]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/3.16.1/grsecurity-3.0-3.16.1-201409010104.patch
Auto commit, grsecurity-3.1-4.9.13-201703052141.patch added.
[thirdparty/grsecurity-scrape.git] / test / 3.16.1 / grsecurity-3.0-3.16.1-201409010104.patch
CommitLineData
6090327c
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
238index b7fa2f5..90cd9f8 100644
239--- a/Documentation/kernel-parameters.txt
240+++ b/Documentation/kernel-parameters.txt
241@@ -1138,6 +1138,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
242 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
243 Default: 1024
244
245+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
246+ ignore grsecurity's /proc restrictions
247+
248+
249 hashdist= [KNL,NUMA] Large hashes allocated during boot
250 are distributed across NUMA nodes. Defaults on
251 for 64-bit NUMA, off otherwise.
252@@ -2141,6 +2145,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
253 noexec=on: enable non-executable mappings (default)
254 noexec=off: disable non-executable mappings
255
256+ nopcid [X86-64]
257+ Disable PCID (Process-Context IDentifier) even if it
258+ is supported by the processor.
259+
260 nosmap [X86]
261 Disable SMAP (Supervisor Mode Access Prevention)
262 even if it is supported by processor.
263@@ -2418,6 +2426,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
264 the specified number of seconds. This is to be used if
265 your oopses keep scrolling off the screen.
266
267+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
268+ virtualization environments that don't cope well with the
269+ expand down segment used by UDEREF on X86-32 or the frequent
270+ page table updates on X86-64.
271+
272+ pax_sanitize_slab=
273+ 0/1 to disable/enable slab object sanitization (enabled by
274+ default).
275+
276+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
277+
278+ pax_extra_latent_entropy
279+ Enable a very simple form of latent entropy extraction
280+ from the first 4GB of memory as the bootmem allocator
281+ passes the memory pages to the buddy allocator.
282+
283+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
284+ when the processor supports PCID.
285+
286 pcbit= [HW,ISDN]
287
288 pcd. [PARIDE]
289diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
290index ee78eba..a06b48d 100644
291--- a/Documentation/networking/filter.txt
292+++ b/Documentation/networking/filter.txt
293@@ -277,11 +277,10 @@ Possible BPF extensions are shown in the following table:
294 mark skb->mark
295 queue skb->queue_mapping
296 hatype skb->dev->type
297- rxhash skb->hash
298+ rxhash skb->rxhash
299 cpu raw_smp_processor_id()
300 vlan_tci vlan_tx_tag_get(skb)
301 vlan_pr vlan_tx_tag_present(skb)
302- rand prandom_u32()
303
304 These extensions can also be prefixed with '#'.
305 Examples for low-level BPF:
306@@ -309,18 +308,6 @@ Examples for low-level BPF:
307 ret #-1
308 drop: ret #0
309
310-** icmp random packet sampling, 1 in 4
311- ldh [12]
312- jne #0x800, drop
313- ldb [23]
314- jneq #1, drop
315- # get a random uint32 number
316- ld rand
317- mod #4
318- jneq #1, drop
319- ret #-1
320- drop: ret #0
321-
322 ** SECCOMP filter example:
323
324 ld [4] /* offsetof(struct seccomp_data, arch) */
325@@ -559,456 +546,6 @@ ffffffffa0069c8f + <x>:
326 For BPF JIT developers, bpf_jit_disasm, bpf_asm and bpf_dbg provides a useful
327 toolchain for developing and testing the kernel's JIT compiler.
328
329-BPF kernel internals
330---------------------
331-Internally, for the kernel interpreter, a different instruction set
332-format with similar underlying principles from BPF described in previous
333-paragraphs is being used. However, the instruction set format is modelled
334-closer to the underlying architecture to mimic native instruction sets, so
335-that a better performance can be achieved (more details later). This new
336-ISA is called 'eBPF' or 'internal BPF' interchangeably. (Note: eBPF which
337-originates from [e]xtended BPF is not the same as BPF extensions! While
338-eBPF is an ISA, BPF extensions date back to classic BPF's 'overloading'
339-of BPF_LD | BPF_{B,H,W} | BPF_ABS instruction.)
340-
341-It is designed to be JITed with one to one mapping, which can also open up
342-the possibility for GCC/LLVM compilers to generate optimized eBPF code through
343-an eBPF backend that performs almost as fast as natively compiled code.
344-
345-The new instruction set was originally designed with the possible goal in
346-mind to write programs in "restricted C" and compile into eBPF with a optional
347-GCC/LLVM backend, so that it can just-in-time map to modern 64-bit CPUs with
348-minimal performance overhead over two steps, that is, C -> eBPF -> native code.
349-
350-Currently, the new format is being used for running user BPF programs, which
351-includes seccomp BPF, classic socket filters, cls_bpf traffic classifier,
352-team driver's classifier for its load-balancing mode, netfilter's xt_bpf
353-extension, PTP dissector/classifier, and much more. They are all internally
354-converted by the kernel into the new instruction set representation and run
355-in the eBPF interpreter. For in-kernel handlers, this all works transparently
356-by using sk_unattached_filter_create() for setting up the filter, resp.
357-sk_unattached_filter_destroy() for destroying it. The macro
358-SK_RUN_FILTER(filter, ctx) transparently invokes eBPF interpreter or JITed
359-code to run the filter. 'filter' is a pointer to struct sk_filter that we
360-got from sk_unattached_filter_create(), and 'ctx' the given context (e.g.
361-skb pointer). All constraints and restrictions from sk_chk_filter() apply
362-before a conversion to the new layout is being done behind the scenes!
363-
364-Currently, the classic BPF format is being used for JITing on most of the
365-architectures. Only x86-64 performs JIT compilation from eBPF instruction set,
366-however, future work will migrate other JIT compilers as well, so that they
367-will profit from the very same benefits.
368-
369-Some core changes of the new internal format:
370-
371-- Number of registers increase from 2 to 10:
372-
373- The old format had two registers A and X, and a hidden frame pointer. The
374- new layout extends this to be 10 internal registers and a read-only frame
375- pointer. Since 64-bit CPUs are passing arguments to functions via registers
376- the number of args from eBPF program to in-kernel function is restricted
377- to 5 and one register is used to accept return value from an in-kernel
378- function. Natively, x86_64 passes first 6 arguments in registers, aarch64/
379- sparcv9/mips64 have 7 - 8 registers for arguments; x86_64 has 6 callee saved
380- registers, and aarch64/sparcv9/mips64 have 11 or more callee saved registers.
381-
382- Therefore, eBPF calling convention is defined as:
383-
384- * R0 - return value from in-kernel function, and exit value for eBPF program
385- * R1 - R5 - arguments from eBPF program to in-kernel function
386- * R6 - R9 - callee saved registers that in-kernel function will preserve
387- * R10 - read-only frame pointer to access stack
388-
389- Thus, all eBPF registers map one to one to HW registers on x86_64, aarch64,
390- etc, and eBPF calling convention maps directly to ABIs used by the kernel on
391- 64-bit architectures.
392-
393- On 32-bit architectures JIT may map programs that use only 32-bit arithmetic
394- and may let more complex programs to be interpreted.
395-
396- R0 - R5 are scratch registers and eBPF program needs spill/fill them if
397- necessary across calls. Note that there is only one eBPF program (== one
398- eBPF main routine) and it cannot call other eBPF functions, it can only
399- call predefined in-kernel functions, though.
400-
401-- Register width increases from 32-bit to 64-bit:
402-
403- Still, the semantics of the original 32-bit ALU operations are preserved
404- via 32-bit subregisters. All eBPF registers are 64-bit with 32-bit lower
405- subregisters that zero-extend into 64-bit if they are being written to.
406- That behavior maps directly to x86_64 and arm64 subregister definition, but
407- makes other JITs more difficult.
408-
409- 32-bit architectures run 64-bit internal BPF programs via interpreter.
410- Their JITs may convert BPF programs that only use 32-bit subregisters into
411- native instruction set and let the rest being interpreted.
412-
413- Operation is 64-bit, because on 64-bit architectures, pointers are also
414- 64-bit wide, and we want to pass 64-bit values in/out of kernel functions,
415- so 32-bit eBPF registers would otherwise require to define register-pair
416- ABI, thus, there won't be able to use a direct eBPF register to HW register
417- mapping and JIT would need to do combine/split/move operations for every
418- register in and out of the function, which is complex, bug prone and slow.
419- Another reason is the use of atomic 64-bit counters.
420-
421-- Conditional jt/jf targets replaced with jt/fall-through:
422-
423- While the original design has constructs such as "if (cond) jump_true;
424- else jump_false;", they are being replaced into alternative constructs like
425- "if (cond) jump_true; /* else fall-through */".
426-
427-- Introduces bpf_call insn and register passing convention for zero overhead
428- calls from/to other kernel functions:
429-
430- Before an in-kernel function call, the internal BPF program needs to
431- place function arguments into R1 to R5 registers to satisfy calling
432- convention, then the interpreter will take them from registers and pass
433- to in-kernel function. If R1 - R5 registers are mapped to CPU registers
434- that are used for argument passing on given architecture, the JIT compiler
435- doesn't need to emit extra moves. Function arguments will be in the correct
436- registers and BPF_CALL instruction will be JITed as single 'call' HW
437- instruction. This calling convention was picked to cover common call
438- situations without performance penalty.
439-
440- After an in-kernel function call, R1 - R5 are reset to unreadable and R0 has
441- a return value of the function. Since R6 - R9 are callee saved, their state
442- is preserved across the call.
443-
444- For example, consider three C functions:
445-
446- u64 f1() { return (*_f2)(1); }
447- u64 f2(u64 a) { return f3(a + 1, a); }
448- u64 f3(u64 a, u64 b) { return a - b; }
449-
450- GCC can compile f1, f3 into x86_64:
451-
452- f1:
453- movl $1, %edi
454- movq _f2(%rip), %rax
455- jmp *%rax
456- f3:
457- movq %rdi, %rax
458- subq %rsi, %rax
459- ret
460-
461- Function f2 in eBPF may look like:
462-
463- f2:
464- bpf_mov R2, R1
465- bpf_add R1, 1
466- bpf_call f3
467- bpf_exit
468-
469- If f2 is JITed and the pointer stored to '_f2'. The calls f1 -> f2 -> f3 and
470- returns will be seamless. Without JIT, __sk_run_filter() interpreter needs to
471- be used to call into f2.
472-
473- For practical reasons all eBPF programs have only one argument 'ctx' which is
474- already placed into R1 (e.g. on __sk_run_filter() startup) and the programs
475- can call kernel functions with up to 5 arguments. Calls with 6 or more arguments
476- are currently not supported, but these restrictions can be lifted if necessary
477- in the future.
478-
479- On 64-bit architectures all register map to HW registers one to one. For
480- example, x86_64 JIT compiler can map them as ...
481-
482- R0 - rax
483- R1 - rdi
484- R2 - rsi
485- R3 - rdx
486- R4 - rcx
487- R5 - r8
488- R6 - rbx
489- R7 - r13
490- R8 - r14
491- R9 - r15
492- R10 - rbp
493-
494- ... since x86_64 ABI mandates rdi, rsi, rdx, rcx, r8, r9 for argument passing
495- and rbx, r12 - r15 are callee saved.
496-
497- Then the following internal BPF pseudo-program:
498-
499- bpf_mov R6, R1 /* save ctx */
500- bpf_mov R2, 2
501- bpf_mov R3, 3
502- bpf_mov R4, 4
503- bpf_mov R5, 5
504- bpf_call foo
505- bpf_mov R7, R0 /* save foo() return value */
506- bpf_mov R1, R6 /* restore ctx for next call */
507- bpf_mov R2, 6
508- bpf_mov R3, 7
509- bpf_mov R4, 8
510- bpf_mov R5, 9
511- bpf_call bar
512- bpf_add R0, R7
513- bpf_exit
514-
515- After JIT to x86_64 may look like:
516-
517- push %rbp
518- mov %rsp,%rbp
519- sub $0x228,%rsp
520- mov %rbx,-0x228(%rbp)
521- mov %r13,-0x220(%rbp)
522- mov %rdi,%rbx
523- mov $0x2,%esi
524- mov $0x3,%edx
525- mov $0x4,%ecx
526- mov $0x5,%r8d
527- callq foo
528- mov %rax,%r13
529- mov %rbx,%rdi
530- mov $0x2,%esi
531- mov $0x3,%edx
532- mov $0x4,%ecx
533- mov $0x5,%r8d
534- callq bar
535- add %r13,%rax
536- mov -0x228(%rbp),%rbx
537- mov -0x220(%rbp),%r13
538- leaveq
539- retq
540-
541- Which is in this example equivalent in C to:
542-
543- u64 bpf_filter(u64 ctx)
544- {
545- return foo(ctx, 2, 3, 4, 5) + bar(ctx, 6, 7, 8, 9);
546- }
547-
548- In-kernel functions foo() and bar() with prototype: u64 (*)(u64 arg1, u64
549- arg2, u64 arg3, u64 arg4, u64 arg5); will receive arguments in proper
550- registers and place their return value into '%rax' which is R0 in eBPF.
551- Prologue and epilogue are emitted by JIT and are implicit in the
552- interpreter. R0-R5 are scratch registers, so eBPF program needs to preserve
553- them across the calls as defined by calling convention.
554-
555- For example the following program is invalid:
556-
557- bpf_mov R1, 1
558- bpf_call foo
559- bpf_mov R0, R1
560- bpf_exit
561-
562- After the call the registers R1-R5 contain junk values and cannot be read.
563- In the future an eBPF verifier can be used to validate internal BPF programs.
564-
565-Also in the new design, eBPF is limited to 4096 insns, which means that any
566-program will terminate quickly and will only call a fixed number of kernel
567-functions. Original BPF and the new format are two operand instructions,
568-which helps to do one-to-one mapping between eBPF insn and x86 insn during JIT.
569-
570-The input context pointer for invoking the interpreter function is generic,
571-its content is defined by a specific use case. For seccomp register R1 points
572-to seccomp_data, for converted BPF filters R1 points to a skb.
573-
574-A program, that is translated internally consists of the following elements:
575-
576- op:16, jt:8, jf:8, k:32 ==> op:8, dst_reg:4, src_reg:4, off:16, imm:32
577-
578-So far 87 internal BPF instructions were implemented. 8-bit 'op' opcode field
579-has room for new instructions. Some of them may use 16/24/32 byte encoding. New
580-instructions must be multiple of 8 bytes to preserve backward compatibility.
581-
582-Internal BPF is a general purpose RISC instruction set. Not every register and
583-every instruction are used during translation from original BPF to new format.
584-For example, socket filters are not using 'exclusive add' instruction, but
585-tracing filters may do to maintain counters of events, for example. Register R9
586-is not used by socket filters either, but more complex filters may be running
587-out of registers and would have to resort to spill/fill to stack.
588-
589-Internal BPF can used as generic assembler for last step performance
590-optimizations, socket filters and seccomp are using it as assembler. Tracing
591-filters may use it as assembler to generate code from kernel. In kernel usage
592-may not be bounded by security considerations, since generated internal BPF code
593-may be optimizing internal code path and not being exposed to the user space.
594-Safety of internal BPF can come from a verifier (TBD). In such use cases as
595-described, it may be used as safe instruction set.
596-
597-Just like the original BPF, the new format runs within a controlled environment,
598-is deterministic and the kernel can easily prove that. The safety of the program
599-can be determined in two steps: first step does depth-first-search to disallow
600-loops and other CFG validation; second step starts from the first insn and
601-descends all possible paths. It simulates execution of every insn and observes
602-the state change of registers and stack.
603-
604-eBPF opcode encoding
605---------------------
606-
607-eBPF is reusing most of the opcode encoding from classic to simplify conversion
608-of classic BPF to eBPF. For arithmetic and jump instructions the 8-bit 'code'
609-field is divided into three parts:
610-
611- +----------------+--------+--------------------+
612- | 4 bits | 1 bit | 3 bits |
613- | operation code | source | instruction class |
614- +----------------+--------+--------------------+
615- (MSB) (LSB)
616-
617-Three LSB bits store instruction class which is one of:
618-
619- Classic BPF classes: eBPF classes:
620-
621- BPF_LD 0x00 BPF_LD 0x00
622- BPF_LDX 0x01 BPF_LDX 0x01
623- BPF_ST 0x02 BPF_ST 0x02
624- BPF_STX 0x03 BPF_STX 0x03
625- BPF_ALU 0x04 BPF_ALU 0x04
626- BPF_JMP 0x05 BPF_JMP 0x05
627- BPF_RET 0x06 [ class 6 unused, for future if needed ]
628- BPF_MISC 0x07 BPF_ALU64 0x07
629-
630-When BPF_CLASS(code) == BPF_ALU or BPF_JMP, 4th bit encodes source operand ...
631-
632- BPF_K 0x00
633- BPF_X 0x08
634-
635- * in classic BPF, this means:
636-
637- BPF_SRC(code) == BPF_X - use register X as source operand
638- BPF_SRC(code) == BPF_K - use 32-bit immediate as source operand
639-
640- * in eBPF, this means:
641-
642- BPF_SRC(code) == BPF_X - use 'src_reg' register as source operand
643- BPF_SRC(code) == BPF_K - use 32-bit immediate as source operand
644-
645-... and four MSB bits store operation code.
646-
647-If BPF_CLASS(code) == BPF_ALU or BPF_ALU64 [ in eBPF ], BPF_OP(code) is one of:
648-
649- BPF_ADD 0x00
650- BPF_SUB 0x10
651- BPF_MUL 0x20
652- BPF_DIV 0x30
653- BPF_OR 0x40
654- BPF_AND 0x50
655- BPF_LSH 0x60
656- BPF_RSH 0x70
657- BPF_NEG 0x80
658- BPF_MOD 0x90
659- BPF_XOR 0xa0
660- BPF_MOV 0xb0 /* eBPF only: mov reg to reg */
661- BPF_ARSH 0xc0 /* eBPF only: sign extending shift right */
662- BPF_END 0xd0 /* eBPF only: endianness conversion */
663-
664-If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of:
665-
666- BPF_JA 0x00
667- BPF_JEQ 0x10
668- BPF_JGT 0x20
669- BPF_JGE 0x30
670- BPF_JSET 0x40
671- BPF_JNE 0x50 /* eBPF only: jump != */
672- BPF_JSGT 0x60 /* eBPF only: signed '>' */
673- BPF_JSGE 0x70 /* eBPF only: signed '>=' */
674- BPF_CALL 0x80 /* eBPF only: function call */
675- BPF_EXIT 0x90 /* eBPF only: function return */
676-
677-So BPF_ADD | BPF_X | BPF_ALU means 32-bit addition in both classic BPF
678-and eBPF. There are only two registers in classic BPF, so it means A += X.
679-In eBPF it means dst_reg = (u32) dst_reg + (u32) src_reg; similarly,
680-BPF_XOR | BPF_K | BPF_ALU means A ^= imm32 in classic BPF and analogous
681-src_reg = (u32) src_reg ^ (u32) imm32 in eBPF.
682-
683-Classic BPF is using BPF_MISC class to represent A = X and X = A moves.
684-eBPF is using BPF_MOV | BPF_X | BPF_ALU code instead. Since there are no
685-BPF_MISC operations in eBPF, the class 7 is used as BPF_ALU64 to mean
686-exactly the same operations as BPF_ALU, but with 64-bit wide operands
687-instead. So BPF_ADD | BPF_X | BPF_ALU64 means 64-bit addition, i.e.:
688-dst_reg = dst_reg + src_reg
689-
690-Classic BPF wastes the whole BPF_RET class to represent a single 'ret'
691-operation. Classic BPF_RET | BPF_K means copy imm32 into return register
692-and perform function exit. eBPF is modeled to match CPU, so BPF_JMP | BPF_EXIT
693-in eBPF means function exit only. The eBPF program needs to store return
694-value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is currently
695-unused and reserved for future use.
696-
697-For load and store instructions the 8-bit 'code' field is divided as:
698-
699- +--------+--------+-------------------+
700- | 3 bits | 2 bits | 3 bits |
701- | mode | size | instruction class |
702- +--------+--------+-------------------+
703- (MSB) (LSB)
704-
705-Size modifier is one of ...
706-
707- BPF_W 0x00 /* word */
708- BPF_H 0x08 /* half word */
709- BPF_B 0x10 /* byte */
710- BPF_DW 0x18 /* eBPF only, double word */
711-
712-... which encodes size of load/store operation:
713-
714- B - 1 byte
715- H - 2 byte
716- W - 4 byte
717- DW - 8 byte (eBPF only)
718-
719-Mode modifier is one of:
720-
721- BPF_IMM 0x00 /* classic BPF only, reserved in eBPF */
722- BPF_ABS 0x20
723- BPF_IND 0x40
724- BPF_MEM 0x60
725- BPF_LEN 0x80 /* classic BPF only, reserved in eBPF */
726- BPF_MSH 0xa0 /* classic BPF only, reserved in eBPF */
727- BPF_XADD 0xc0 /* eBPF only, exclusive add */
728-
729-eBPF has two non-generic instructions: (BPF_ABS | <size> | BPF_LD) and
730-(BPF_IND | <size> | BPF_LD) which are used to access packet data.
731-
732-They had to be carried over from classic to have strong performance of
733-socket filters running in eBPF interpreter. These instructions can only
734-be used when interpreter context is a pointer to 'struct sk_buff' and
735-have seven implicit operands. Register R6 is an implicit input that must
736-contain pointer to sk_buff. Register R0 is an implicit output which contains
737-the data fetched from the packet. Registers R1-R5 are scratch registers
738-and must not be used to store the data across BPF_ABS | BPF_LD or
739-BPF_IND | BPF_LD instructions.
740-
741-These instructions have implicit program exit condition as well. When
742-eBPF program is trying to access the data beyond the packet boundary,
743-the interpreter will abort the execution of the program. JIT compilers
744-therefore must preserve this property. src_reg and imm32 fields are
745-explicit inputs to these instructions.
746-
747-For example:
748-
749- BPF_IND | BPF_W | BPF_LD means:
750-
751- R0 = ntohl(*(u32 *) (((struct sk_buff *) R6)->data + src_reg + imm32))
752- and R1 - R5 were scratched.
753-
754-Unlike classic BPF instruction set, eBPF has generic load/store operations:
755-
756-BPF_MEM | <size> | BPF_STX: *(size *) (dst_reg + off) = src_reg
757-BPF_MEM | <size> | BPF_ST: *(size *) (dst_reg + off) = imm32
758-BPF_MEM | <size> | BPF_LDX: dst_reg = *(size *) (src_reg + off)
759-BPF_XADD | BPF_W | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg
760-BPF_XADD | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg
761-
762-Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW. Note that 1 and
763-2 byte atomic increments are not supported.
764-
765-Testing
766--------
767-
768-Next to the BPF toolchain, the kernel also ships a test module that contains
769-various test cases for classic and internal BPF that can be executed against
770-the BPF interpreter and JIT compiler. It can be found in lib/test_bpf.c and
771-enabled via Kconfig:
772-
773- CONFIG_TEST_BPF=m
774-
775-After the module has been built and installed, the test suite can be executed
776-via insmod or modprobe against 'test_bpf' module. Results of the test cases
777-including timings in nsec can be found in the kernel log (dmesg).
778-
779 Misc
780 ----
781
782@@ -1024,4 +561,3 @@ the underlying architecture.
783
784 Jay Schulist <jschlst@samba.org>
785 Daniel Borkmann <dborkman@redhat.com>
786-Alexei Starovoitov <ast@plumgrid.com>
787diff --git a/Makefile b/Makefile
788index 87663a2..906dc62 100644
789--- a/Makefile
790+++ b/Makefile
791@@ -303,8 +303,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
792
793 HOSTCC = gcc
794 HOSTCXX = g++
795-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
796-HOSTCXXFLAGS = -O2
797+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
798+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
799+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
800
801 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
802 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
803@@ -449,8 +450,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
804 # Rules shared between *config targets and build targets
805
806 # Basic helpers built in scripts/
807-PHONY += scripts_basic
808-scripts_basic:
809+PHONY += scripts_basic gcc-plugins
810+scripts_basic: gcc-plugins
811 $(Q)$(MAKE) $(build)=scripts/basic
812 $(Q)rm -f .tmp_quiet_recordmcount
813
814@@ -621,6 +622,75 @@ else
815 KBUILD_CFLAGS += -O2
816 endif
817
818+# Tell gcc to never replace conditional load with a non-conditional one
819+KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
820+
821+ifndef DISABLE_PAX_PLUGINS
822+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
823+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
824+else
825+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
826+endif
827+ifneq ($(PLUGINCC),)
828+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
829+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
830+endif
831+ifdef CONFIG_PAX_MEMORY_STACKLEAK
832+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
833+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
834+endif
835+ifdef CONFIG_KALLOCSTAT_PLUGIN
836+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
837+endif
838+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
839+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
840+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
841+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
842+endif
843+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
844+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
845+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
846+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
847+endif
848+endif
849+ifdef CONFIG_CHECKER_PLUGIN
850+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
851+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
852+endif
853+endif
854+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
855+ifdef CONFIG_PAX_SIZE_OVERFLOW
856+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
857+endif
858+ifdef CONFIG_PAX_LATENT_ENTROPY
859+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
860+endif
861+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
862+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
863+endif
864+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
865+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
866+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
867+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
868+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
869+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
870+ifeq ($(KBUILD_EXTMOD),)
871+gcc-plugins:
872+ $(Q)$(MAKE) $(build)=tools/gcc
873+else
874+gcc-plugins: ;
875+endif
876+else
877+gcc-plugins:
878+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
879+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
880+else
881+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
882+endif
883+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
884+endif
885+endif
886+
887 ifdef CONFIG_READABLE_ASM
888 # Disable optimizations that make assembler listings hard to read.
889 # reorder blocks reorders the control in the function
890@@ -839,7 +909,7 @@ export mod_sign_cmd
891
892
893 ifeq ($(KBUILD_EXTMOD),)
894-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
895+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
896
897 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
898 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
899@@ -888,6 +958,8 @@ endif
900
901 # The actual objects are generated when descending,
902 # make sure no implicit rule kicks in
903+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
904+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
905 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
906
907 # Handle descending into subdirectories listed in $(vmlinux-dirs)
908@@ -897,7 +969,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
909 # Error messages still appears in the original language
910
911 PHONY += $(vmlinux-dirs)
912-$(vmlinux-dirs): prepare scripts
913+$(vmlinux-dirs): gcc-plugins prepare scripts
914 $(Q)$(MAKE) $(build)=$@
915
916 define filechk_kernel.release
917@@ -940,10 +1012,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
918
919 archprepare: archheaders archscripts prepare1 scripts_basic
920
921+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
922+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
923 prepare0: archprepare FORCE
924 $(Q)$(MAKE) $(build)=.
925
926 # All the preparing..
927+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
928 prepare: prepare0
929
930 # Generate some files
931@@ -1051,6 +1126,8 @@ all: modules
932 # using awk while concatenating to the final file.
933
934 PHONY += modules
935+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
936+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
937 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
938 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
939 @$(kecho) ' Building modules, stage 2.';
940@@ -1066,7 +1143,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
941
942 # Target to prepare building external modules
943 PHONY += modules_prepare
944-modules_prepare: prepare scripts
945+modules_prepare: gcc-plugins prepare scripts
946
947 # Target to install modules
948 PHONY += modules_install
949@@ -1132,7 +1209,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
950 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
951 signing_key.priv signing_key.x509 x509.genkey \
952 extra_certificates signing_key.x509.keyid \
953- signing_key.x509.signer include/linux/version.h
954+ signing_key.x509.signer include/linux/version.h \
955+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
956+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
957+ tools/gcc/randomize_layout_seed.h
958
959 # clean - Delete most, but leave enough to build external modules
960 #
961@@ -1171,7 +1251,7 @@ distclean: mrproper
962 @find $(srctree) $(RCS_FIND_IGNORE) \
963 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
964 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
965- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
966+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
967 -type f -print | xargs rm -f
968
969
970@@ -1332,6 +1412,8 @@ PHONY += $(module-dirs) modules
971 $(module-dirs): crmodverdir $(objtree)/Module.symvers
972 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
973
974+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
975+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
976 modules: $(module-dirs)
977 @$(kecho) ' Building modules, stage 2.';
978 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
979@@ -1471,17 +1553,21 @@ else
980 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
981 endif
982
983-%.s: %.c prepare scripts FORCE
984+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
985+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
986+%.s: %.c gcc-plugins prepare scripts FORCE
987 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
988 %.i: %.c prepare scripts FORCE
989 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
990-%.o: %.c prepare scripts FORCE
991+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
992+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
993+%.o: %.c gcc-plugins prepare scripts FORCE
994 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
995 %.lst: %.c prepare scripts FORCE
996 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
997-%.s: %.S prepare scripts FORCE
998+%.s: %.S gcc-plugins prepare scripts FORCE
999 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1000-%.o: %.S prepare scripts FORCE
1001+%.o: %.S gcc-plugins prepare scripts FORCE
1002 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1003 %.symtypes: %.c prepare scripts FORCE
1004 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1005@@ -1491,11 +1577,15 @@ endif
1006 $(cmd_crmodverdir)
1007 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
1008 $(build)=$(build-dir)
1009-%/: prepare scripts FORCE
1010+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1011+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1012+%/: gcc-plugins prepare scripts FORCE
1013 $(cmd_crmodverdir)
1014 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
1015 $(build)=$(build-dir)
1016-%.ko: prepare scripts FORCE
1017+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1018+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1019+%.ko: gcc-plugins prepare scripts FORCE
1020 $(cmd_crmodverdir)
1021 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
1022 $(build)=$(build-dir) $(@:.ko=.o)
1023diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
1024index ed60a1e..47f1a55 100644
1025--- a/arch/alpha/include/asm/atomic.h
1026+++ b/arch/alpha/include/asm/atomic.h
1027@@ -292,4 +292,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
1028 #define atomic_dec(v) atomic_sub(1,(v))
1029 #define atomic64_dec(v) atomic64_sub(1,(v))
1030
1031+#define atomic64_read_unchecked(v) atomic64_read(v)
1032+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1033+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1034+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1035+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1036+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1037+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1038+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1039+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1040+
1041 #endif /* _ALPHA_ATOMIC_H */
1042diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
1043index ad368a9..fbe0f25 100644
1044--- a/arch/alpha/include/asm/cache.h
1045+++ b/arch/alpha/include/asm/cache.h
1046@@ -4,19 +4,19 @@
1047 #ifndef __ARCH_ALPHA_CACHE_H
1048 #define __ARCH_ALPHA_CACHE_H
1049
1050+#include <linux/const.h>
1051
1052 /* Bytes per L1 (data) cache line. */
1053 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
1054-# define L1_CACHE_BYTES 64
1055 # define L1_CACHE_SHIFT 6
1056 #else
1057 /* Both EV4 and EV5 are write-through, read-allocate,
1058 direct-mapped, physical.
1059 */
1060-# define L1_CACHE_BYTES 32
1061 # define L1_CACHE_SHIFT 5
1062 #endif
1063
1064+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1065 #define SMP_CACHE_BYTES L1_CACHE_BYTES
1066
1067 #endif
1068diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
1069index 968d999..d36b2df 100644
1070--- a/arch/alpha/include/asm/elf.h
1071+++ b/arch/alpha/include/asm/elf.h
1072@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1073
1074 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
1075
1076+#ifdef CONFIG_PAX_ASLR
1077+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
1078+
1079+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
1080+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
1081+#endif
1082+
1083 /* $0 is set by ld.so to a pointer to a function which might be
1084 registered using atexit. This provides a mean for the dynamic
1085 linker to call DT_FINI functions for shared libraries that have
1086diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
1087index aab14a0..b4fa3e7 100644
1088--- a/arch/alpha/include/asm/pgalloc.h
1089+++ b/arch/alpha/include/asm/pgalloc.h
1090@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
1091 pgd_set(pgd, pmd);
1092 }
1093
1094+static inline void
1095+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
1096+{
1097+ pgd_populate(mm, pgd, pmd);
1098+}
1099+
1100 extern pgd_t *pgd_alloc(struct mm_struct *mm);
1101
1102 static inline void
1103diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
1104index d8f9b7e..f6222fa 100644
1105--- a/arch/alpha/include/asm/pgtable.h
1106+++ b/arch/alpha/include/asm/pgtable.h
1107@@ -102,6 +102,17 @@ struct vm_area_struct;
1108 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
1109 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
1110 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
1111+
1112+#ifdef CONFIG_PAX_PAGEEXEC
1113+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
1114+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
1115+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
1116+#else
1117+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1118+# define PAGE_COPY_NOEXEC PAGE_COPY
1119+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1120+#endif
1121+
1122 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
1123
1124 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
1125diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
1126index 2fd00b7..cfd5069 100644
1127--- a/arch/alpha/kernel/module.c
1128+++ b/arch/alpha/kernel/module.c
1129@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
1130
1131 /* The small sections were sorted to the end of the segment.
1132 The following should definitely cover them. */
1133- gp = (u64)me->module_core + me->core_size - 0x8000;
1134+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
1135 got = sechdrs[me->arch.gotsecindex].sh_addr;
1136
1137 for (i = 0; i < n; i++) {
1138diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
1139index 1402fcc..0b1abd2 100644
1140--- a/arch/alpha/kernel/osf_sys.c
1141+++ b/arch/alpha/kernel/osf_sys.c
1142@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
1143 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
1144
1145 static unsigned long
1146-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
1147- unsigned long limit)
1148+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
1149+ unsigned long limit, unsigned long flags)
1150 {
1151 struct vm_unmapped_area_info info;
1152+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
1153
1154 info.flags = 0;
1155 info.length = len;
1156@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
1157 info.high_limit = limit;
1158 info.align_mask = 0;
1159 info.align_offset = 0;
1160+ info.threadstack_offset = offset;
1161 return vm_unmapped_area(&info);
1162 }
1163
1164@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1165 merely specific addresses, but regions of memory -- perhaps
1166 this feature should be incorporated into all ports? */
1167
1168+#ifdef CONFIG_PAX_RANDMMAP
1169+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1170+#endif
1171+
1172 if (addr) {
1173- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
1174+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
1175 if (addr != (unsigned long) -ENOMEM)
1176 return addr;
1177 }
1178
1179 /* Next, try allocating at TASK_UNMAPPED_BASE. */
1180- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
1181- len, limit);
1182+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
1183+
1184 if (addr != (unsigned long) -ENOMEM)
1185 return addr;
1186
1187 /* Finally, try allocating in low memory. */
1188- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
1189+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
1190
1191 return addr;
1192 }
1193diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
1194index 98838a0..b304fb4 100644
1195--- a/arch/alpha/mm/fault.c
1196+++ b/arch/alpha/mm/fault.c
1197@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
1198 __reload_thread(pcb);
1199 }
1200
1201+#ifdef CONFIG_PAX_PAGEEXEC
1202+/*
1203+ * PaX: decide what to do with offenders (regs->pc = fault address)
1204+ *
1205+ * returns 1 when task should be killed
1206+ * 2 when patched PLT trampoline was detected
1207+ * 3 when unpatched PLT trampoline was detected
1208+ */
1209+static int pax_handle_fetch_fault(struct pt_regs *regs)
1210+{
1211+
1212+#ifdef CONFIG_PAX_EMUPLT
1213+ int err;
1214+
1215+ do { /* PaX: patched PLT emulation #1 */
1216+ unsigned int ldah, ldq, jmp;
1217+
1218+ err = get_user(ldah, (unsigned int *)regs->pc);
1219+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
1220+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
1221+
1222+ if (err)
1223+ break;
1224+
1225+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
1226+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
1227+ jmp == 0x6BFB0000U)
1228+ {
1229+ unsigned long r27, addr;
1230+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
1231+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
1232+
1233+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
1234+ err = get_user(r27, (unsigned long *)addr);
1235+ if (err)
1236+ break;
1237+
1238+ regs->r27 = r27;
1239+ regs->pc = r27;
1240+ return 2;
1241+ }
1242+ } while (0);
1243+
1244+ do { /* PaX: patched PLT emulation #2 */
1245+ unsigned int ldah, lda, br;
1246+
1247+ err = get_user(ldah, (unsigned int *)regs->pc);
1248+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
1249+ err |= get_user(br, (unsigned int *)(regs->pc+8));
1250+
1251+ if (err)
1252+ break;
1253+
1254+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
1255+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
1256+ (br & 0xFFE00000U) == 0xC3E00000U)
1257+ {
1258+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
1259+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
1260+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
1261+
1262+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
1263+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
1264+ return 2;
1265+ }
1266+ } while (0);
1267+
1268+ do { /* PaX: unpatched PLT emulation */
1269+ unsigned int br;
1270+
1271+ err = get_user(br, (unsigned int *)regs->pc);
1272+
1273+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
1274+ unsigned int br2, ldq, nop, jmp;
1275+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
1276+
1277+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
1278+ err = get_user(br2, (unsigned int *)addr);
1279+ err |= get_user(ldq, (unsigned int *)(addr+4));
1280+ err |= get_user(nop, (unsigned int *)(addr+8));
1281+ err |= get_user(jmp, (unsigned int *)(addr+12));
1282+ err |= get_user(resolver, (unsigned long *)(addr+16));
1283+
1284+ if (err)
1285+ break;
1286+
1287+ if (br2 == 0xC3600000U &&
1288+ ldq == 0xA77B000CU &&
1289+ nop == 0x47FF041FU &&
1290+ jmp == 0x6B7B0000U)
1291+ {
1292+ regs->r28 = regs->pc+4;
1293+ regs->r27 = addr+16;
1294+ regs->pc = resolver;
1295+ return 3;
1296+ }
1297+ }
1298+ } while (0);
1299+#endif
1300+
1301+ return 1;
1302+}
1303+
1304+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1305+{
1306+ unsigned long i;
1307+
1308+ printk(KERN_ERR "PAX: bytes at PC: ");
1309+ for (i = 0; i < 5; i++) {
1310+ unsigned int c;
1311+ if (get_user(c, (unsigned int *)pc+i))
1312+ printk(KERN_CONT "???????? ");
1313+ else
1314+ printk(KERN_CONT "%08x ", c);
1315+ }
1316+ printk("\n");
1317+}
1318+#endif
1319
1320 /*
1321 * This routine handles page faults. It determines the address,
1322@@ -133,8 +251,29 @@ retry:
1323 good_area:
1324 si_code = SEGV_ACCERR;
1325 if (cause < 0) {
1326- if (!(vma->vm_flags & VM_EXEC))
1327+ if (!(vma->vm_flags & VM_EXEC)) {
1328+
1329+#ifdef CONFIG_PAX_PAGEEXEC
1330+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
1331+ goto bad_area;
1332+
1333+ up_read(&mm->mmap_sem);
1334+ switch (pax_handle_fetch_fault(regs)) {
1335+
1336+#ifdef CONFIG_PAX_EMUPLT
1337+ case 2:
1338+ case 3:
1339+ return;
1340+#endif
1341+
1342+ }
1343+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
1344+ do_group_exit(SIGKILL);
1345+#else
1346 goto bad_area;
1347+#endif
1348+
1349+ }
1350 } else if (!cause) {
1351 /* Allow reads even for write-only mappings */
1352 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
1353diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
1354index 290f02ee..a639059 100644
1355--- a/arch/arm/Kconfig
1356+++ b/arch/arm/Kconfig
1357@@ -1787,7 +1787,7 @@ config ALIGNMENT_TRAP
1358
1359 config UACCESS_WITH_MEMCPY
1360 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
1361- depends on MMU
1362+ depends on MMU && !PAX_MEMORY_UDEREF
1363 default y if CPU_FEROCEON
1364 help
1365 Implement faster copy_to_user and clear_user methods for CPU
1366@@ -2051,6 +2051,7 @@ config XIP_PHYS_ADDR
1367 config KEXEC
1368 bool "Kexec system call (EXPERIMENTAL)"
1369 depends on (!SMP || PM_SLEEP_SMP)
1370+ depends on !GRKERNSEC_KMEM
1371 help
1372 kexec is a system call that implements the ability to shutdown your
1373 current kernel, and to start another kernel. It is like a reboot
1374diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
1375index 3040359..89b3dfc 100644
1376--- a/arch/arm/include/asm/atomic.h
1377+++ b/arch/arm/include/asm/atomic.h
1378@@ -18,17 +18,35 @@
1379 #include <asm/barrier.h>
1380 #include <asm/cmpxchg.h>
1381
1382+#ifdef CONFIG_GENERIC_ATOMIC64
1383+#include <asm-generic/atomic64.h>
1384+#endif
1385+
1386 #define ATOMIC_INIT(i) { (i) }
1387
1388 #ifdef __KERNEL__
1389
1390+#define _ASM_EXTABLE(from, to) \
1391+" .pushsection __ex_table,\"a\"\n"\
1392+" .align 3\n" \
1393+" .long " #from ", " #to"\n" \
1394+" .popsection"
1395+
1396 /*
1397 * On ARM, ordinary assignment (str instruction) doesn't clear the local
1398 * strex/ldrex monitor on some implementations. The reason we can use it for
1399 * atomic_set() is the clrex or dummy strex done on every exception return.
1400 */
1401 #define atomic_read(v) (*(volatile int *)&(v)->counter)
1402+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1403+{
1404+ return v->counter;
1405+}
1406 #define atomic_set(v,i) (((v)->counter) = (i))
1407+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1408+{
1409+ v->counter = i;
1410+}
1411
1412 #if __LINUX_ARM_ARCH__ >= 6
1413
1414@@ -44,6 +62,36 @@ static inline void atomic_add(int i, atomic_t *v)
1415
1416 prefetchw(&v->counter);
1417 __asm__ __volatile__("@ atomic_add\n"
1418+"1: ldrex %1, [%3]\n"
1419+" adds %0, %1, %4\n"
1420+
1421+#ifdef CONFIG_PAX_REFCOUNT
1422+" bvc 3f\n"
1423+"2: bkpt 0xf103\n"
1424+"3:\n"
1425+#endif
1426+
1427+" strex %1, %0, [%3]\n"
1428+" teq %1, #0\n"
1429+" bne 1b"
1430+
1431+#ifdef CONFIG_PAX_REFCOUNT
1432+"\n4:\n"
1433+ _ASM_EXTABLE(2b, 4b)
1434+#endif
1435+
1436+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1437+ : "r" (&v->counter), "Ir" (i)
1438+ : "cc");
1439+}
1440+
1441+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1442+{
1443+ unsigned long tmp;
1444+ int result;
1445+
1446+ prefetchw(&v->counter);
1447+ __asm__ __volatile__("@ atomic_add_unchecked\n"
1448 "1: ldrex %0, [%3]\n"
1449 " add %0, %0, %4\n"
1450 " strex %1, %0, [%3]\n"
1451@@ -63,6 +111,43 @@ static inline int atomic_add_return(int i, atomic_t *v)
1452 prefetchw(&v->counter);
1453
1454 __asm__ __volatile__("@ atomic_add_return\n"
1455+"1: ldrex %1, [%3]\n"
1456+" adds %0, %1, %4\n"
1457+
1458+#ifdef CONFIG_PAX_REFCOUNT
1459+" bvc 3f\n"
1460+" mov %0, %1\n"
1461+"2: bkpt 0xf103\n"
1462+"3:\n"
1463+#endif
1464+
1465+" strex %1, %0, [%3]\n"
1466+" teq %1, #0\n"
1467+" bne 1b"
1468+
1469+#ifdef CONFIG_PAX_REFCOUNT
1470+"\n4:\n"
1471+ _ASM_EXTABLE(2b, 4b)
1472+#endif
1473+
1474+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1475+ : "r" (&v->counter), "Ir" (i)
1476+ : "cc");
1477+
1478+ smp_mb();
1479+
1480+ return result;
1481+}
1482+
1483+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1484+{
1485+ unsigned long tmp;
1486+ int result;
1487+
1488+ smp_mb();
1489+ prefetchw(&v->counter);
1490+
1491+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
1492 "1: ldrex %0, [%3]\n"
1493 " add %0, %0, %4\n"
1494 " strex %1, %0, [%3]\n"
1495@@ -84,6 +169,36 @@ static inline void atomic_sub(int i, atomic_t *v)
1496
1497 prefetchw(&v->counter);
1498 __asm__ __volatile__("@ atomic_sub\n"
1499+"1: ldrex %1, [%3]\n"
1500+" subs %0, %1, %4\n"
1501+
1502+#ifdef CONFIG_PAX_REFCOUNT
1503+" bvc 3f\n"
1504+"2: bkpt 0xf103\n"
1505+"3:\n"
1506+#endif
1507+
1508+" strex %1, %0, [%3]\n"
1509+" teq %1, #0\n"
1510+" bne 1b"
1511+
1512+#ifdef CONFIG_PAX_REFCOUNT
1513+"\n4:\n"
1514+ _ASM_EXTABLE(2b, 4b)
1515+#endif
1516+
1517+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1518+ : "r" (&v->counter), "Ir" (i)
1519+ : "cc");
1520+}
1521+
1522+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1523+{
1524+ unsigned long tmp;
1525+ int result;
1526+
1527+ prefetchw(&v->counter);
1528+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1529 "1: ldrex %0, [%3]\n"
1530 " sub %0, %0, %4\n"
1531 " strex %1, %0, [%3]\n"
1532@@ -103,11 +218,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1533 prefetchw(&v->counter);
1534
1535 __asm__ __volatile__("@ atomic_sub_return\n"
1536-"1: ldrex %0, [%3]\n"
1537-" sub %0, %0, %4\n"
1538+"1: ldrex %1, [%3]\n"
1539+" subs %0, %1, %4\n"
1540+
1541+#ifdef CONFIG_PAX_REFCOUNT
1542+" bvc 3f\n"
1543+" mov %0, %1\n"
1544+"2: bkpt 0xf103\n"
1545+"3:\n"
1546+#endif
1547+
1548 " strex %1, %0, [%3]\n"
1549 " teq %1, #0\n"
1550 " bne 1b"
1551+
1552+#ifdef CONFIG_PAX_REFCOUNT
1553+"\n4:\n"
1554+ _ASM_EXTABLE(2b, 4b)
1555+#endif
1556+
1557 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1558 : "r" (&v->counter), "Ir" (i)
1559 : "cc");
1560@@ -152,12 +281,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1561 __asm__ __volatile__ ("@ atomic_add_unless\n"
1562 "1: ldrex %0, [%4]\n"
1563 " teq %0, %5\n"
1564-" beq 2f\n"
1565-" add %1, %0, %6\n"
1566+" beq 4f\n"
1567+" adds %1, %0, %6\n"
1568+
1569+#ifdef CONFIG_PAX_REFCOUNT
1570+" bvc 3f\n"
1571+"2: bkpt 0xf103\n"
1572+"3:\n"
1573+#endif
1574+
1575 " strex %2, %1, [%4]\n"
1576 " teq %2, #0\n"
1577 " bne 1b\n"
1578-"2:"
1579+"4:"
1580+
1581+#ifdef CONFIG_PAX_REFCOUNT
1582+ _ASM_EXTABLE(2b, 4b)
1583+#endif
1584+
1585 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1586 : "r" (&v->counter), "r" (u), "r" (a)
1587 : "cc");
1588@@ -168,6 +309,28 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1589 return oldval;
1590 }
1591
1592+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1593+{
1594+ unsigned long oldval, res;
1595+
1596+ smp_mb();
1597+
1598+ do {
1599+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1600+ "ldrex %1, [%3]\n"
1601+ "mov %0, #0\n"
1602+ "teq %1, %4\n"
1603+ "strexeq %0, %5, [%3]\n"
1604+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1605+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1606+ : "cc");
1607+ } while (res);
1608+
1609+ smp_mb();
1610+
1611+ return oldval;
1612+}
1613+
1614 #else /* ARM_ARCH_6 */
1615
1616 #ifdef CONFIG_SMP
1617@@ -186,7 +349,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1618
1619 return val;
1620 }
1621+
1622+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1623+{
1624+ return atomic_add_return(i, v);
1625+}
1626+
1627 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1628+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1629+{
1630+ (void) atomic_add_return(i, v);
1631+}
1632
1633 static inline int atomic_sub_return(int i, atomic_t *v)
1634 {
1635@@ -201,6 +374,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1636 return val;
1637 }
1638 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1639+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1640+{
1641+ (void) atomic_sub_return(i, v);
1642+}
1643
1644 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1645 {
1646@@ -216,6 +393,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1647 return ret;
1648 }
1649
1650+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1651+{
1652+ return atomic_cmpxchg(v, old, new);
1653+}
1654+
1655 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1656 {
1657 int c, old;
1658@@ -229,13 +411,33 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1659 #endif /* __LINUX_ARM_ARCH__ */
1660
1661 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1662+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1663+{
1664+ return xchg(&v->counter, new);
1665+}
1666
1667 #define atomic_inc(v) atomic_add(1, v)
1668+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1669+{
1670+ atomic_add_unchecked(1, v);
1671+}
1672 #define atomic_dec(v) atomic_sub(1, v)
1673+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1674+{
1675+ atomic_sub_unchecked(1, v);
1676+}
1677
1678 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1679+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1680+{
1681+ return atomic_add_return_unchecked(1, v) == 0;
1682+}
1683 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1684 #define atomic_inc_return(v) (atomic_add_return(1, v))
1685+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1686+{
1687+ return atomic_add_return_unchecked(1, v);
1688+}
1689 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1690 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1691
1692@@ -246,6 +448,14 @@ typedef struct {
1693 long long counter;
1694 } atomic64_t;
1695
1696+#ifdef CONFIG_PAX_REFCOUNT
1697+typedef struct {
1698+ long long counter;
1699+} atomic64_unchecked_t;
1700+#else
1701+typedef atomic64_t atomic64_unchecked_t;
1702+#endif
1703+
1704 #define ATOMIC64_INIT(i) { (i) }
1705
1706 #ifdef CONFIG_ARM_LPAE
1707@@ -262,6 +472,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1708 return result;
1709 }
1710
1711+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1712+{
1713+ long long result;
1714+
1715+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1716+" ldrd %0, %H0, [%1]"
1717+ : "=&r" (result)
1718+ : "r" (&v->counter), "Qo" (v->counter)
1719+ );
1720+
1721+ return result;
1722+}
1723+
1724 static inline void atomic64_set(atomic64_t *v, long long i)
1725 {
1726 __asm__ __volatile__("@ atomic64_set\n"
1727@@ -270,6 +493,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1728 : "r" (&v->counter), "r" (i)
1729 );
1730 }
1731+
1732+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1733+{
1734+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1735+" strd %2, %H2, [%1]"
1736+ : "=Qo" (v->counter)
1737+ : "r" (&v->counter), "r" (i)
1738+ );
1739+}
1740 #else
1741 static inline long long atomic64_read(const atomic64_t *v)
1742 {
1743@@ -284,6 +516,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1744 return result;
1745 }
1746
1747+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1748+{
1749+ long long result;
1750+
1751+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1752+" ldrexd %0, %H0, [%1]"
1753+ : "=&r" (result)
1754+ : "r" (&v->counter), "Qo" (v->counter)
1755+ );
1756+
1757+ return result;
1758+}
1759+
1760 static inline void atomic64_set(atomic64_t *v, long long i)
1761 {
1762 long long tmp;
1763@@ -298,6 +543,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1764 : "r" (&v->counter), "r" (i)
1765 : "cc");
1766 }
1767+
1768+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1769+{
1770+ long long tmp;
1771+
1772+ prefetchw(&v->counter);
1773+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1774+"1: ldrexd %0, %H0, [%2]\n"
1775+" strexd %0, %3, %H3, [%2]\n"
1776+" teq %0, #0\n"
1777+" bne 1b"
1778+ : "=&r" (tmp), "=Qo" (v->counter)
1779+ : "r" (&v->counter), "r" (i)
1780+ : "cc");
1781+}
1782 #endif
1783
1784 static inline void atomic64_add(long long i, atomic64_t *v)
1785@@ -309,6 +569,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
1786 __asm__ __volatile__("@ atomic64_add\n"
1787 "1: ldrexd %0, %H0, [%3]\n"
1788 " adds %Q0, %Q0, %Q4\n"
1789+" adcs %R0, %R0, %R4\n"
1790+
1791+#ifdef CONFIG_PAX_REFCOUNT
1792+" bvc 3f\n"
1793+"2: bkpt 0xf103\n"
1794+"3:\n"
1795+#endif
1796+
1797+" strexd %1, %0, %H0, [%3]\n"
1798+" teq %1, #0\n"
1799+" bne 1b"
1800+
1801+#ifdef CONFIG_PAX_REFCOUNT
1802+"\n4:\n"
1803+ _ASM_EXTABLE(2b, 4b)
1804+#endif
1805+
1806+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1807+ : "r" (&v->counter), "r" (i)
1808+ : "cc");
1809+}
1810+
1811+static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
1812+{
1813+ long long result;
1814+ unsigned long tmp;
1815+
1816+ prefetchw(&v->counter);
1817+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1818+"1: ldrexd %0, %H0, [%3]\n"
1819+" adds %Q0, %Q0, %Q4\n"
1820 " adc %R0, %R0, %R4\n"
1821 " strexd %1, %0, %H0, [%3]\n"
1822 " teq %1, #0\n"
1823@@ -329,6 +620,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
1824 __asm__ __volatile__("@ atomic64_add_return\n"
1825 "1: ldrexd %0, %H0, [%3]\n"
1826 " adds %Q0, %Q0, %Q4\n"
1827+" adcs %R0, %R0, %R4\n"
1828+
1829+#ifdef CONFIG_PAX_REFCOUNT
1830+" bvc 3f\n"
1831+" mov %0, %1\n"
1832+" mov %H0, %H1\n"
1833+"2: bkpt 0xf103\n"
1834+"3:\n"
1835+#endif
1836+
1837+" strexd %1, %0, %H0, [%3]\n"
1838+" teq %1, #0\n"
1839+" bne 1b"
1840+
1841+#ifdef CONFIG_PAX_REFCOUNT
1842+"\n4:\n"
1843+ _ASM_EXTABLE(2b, 4b)
1844+#endif
1845+
1846+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1847+ : "r" (&v->counter), "r" (i)
1848+ : "cc");
1849+
1850+ smp_mb();
1851+
1852+ return result;
1853+}
1854+
1855+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
1856+{
1857+ long long result;
1858+ unsigned long tmp;
1859+
1860+ smp_mb();
1861+
1862+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1863+"1: ldrexd %0, %H0, [%3]\n"
1864+" adds %Q0, %Q0, %Q4\n"
1865 " adc %R0, %R0, %R4\n"
1866 " strexd %1, %0, %H0, [%3]\n"
1867 " teq %1, #0\n"
1868@@ -351,6 +680,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
1869 __asm__ __volatile__("@ atomic64_sub\n"
1870 "1: ldrexd %0, %H0, [%3]\n"
1871 " subs %Q0, %Q0, %Q4\n"
1872+" sbcs %R0, %R0, %R4\n"
1873+
1874+#ifdef CONFIG_PAX_REFCOUNT
1875+" bvc 3f\n"
1876+"2: bkpt 0xf103\n"
1877+"3:\n"
1878+#endif
1879+
1880+" strexd %1, %0, %H0, [%3]\n"
1881+" teq %1, #0\n"
1882+" bne 1b"
1883+
1884+#ifdef CONFIG_PAX_REFCOUNT
1885+"\n4:\n"
1886+ _ASM_EXTABLE(2b, 4b)
1887+#endif
1888+
1889+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1890+ : "r" (&v->counter), "r" (i)
1891+ : "cc");
1892+}
1893+
1894+static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v)
1895+{
1896+ long long result;
1897+ unsigned long tmp;
1898+
1899+ prefetchw(&v->counter);
1900+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1901+"1: ldrexd %0, %H0, [%3]\n"
1902+" subs %Q0, %Q0, %Q4\n"
1903 " sbc %R0, %R0, %R4\n"
1904 " strexd %1, %0, %H0, [%3]\n"
1905 " teq %1, #0\n"
1906@@ -371,16 +731,29 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
1907 __asm__ __volatile__("@ atomic64_sub_return\n"
1908 "1: ldrexd %0, %H0, [%3]\n"
1909 " subs %Q0, %Q0, %Q4\n"
1910-" sbc %R0, %R0, %R4\n"
1911+" sbcs %R0, %R0, %R4\n"
1912+
1913+#ifdef CONFIG_PAX_REFCOUNT
1914+" bvc 3f\n"
1915+" mov %0, %1\n"
1916+" mov %H0, %H1\n"
1917+"2: bkpt 0xf103\n"
1918+"3:\n"
1919+#endif
1920+
1921 " strexd %1, %0, %H0, [%3]\n"
1922 " teq %1, #0\n"
1923 " bne 1b"
1924+
1925+#ifdef CONFIG_PAX_REFCOUNT
1926+"\n4:\n"
1927+ _ASM_EXTABLE(2b, 4b)
1928+#endif
1929+
1930 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1931 : "r" (&v->counter), "r" (i)
1932 : "cc");
1933
1934- smp_mb();
1935-
1936 return result;
1937 }
1938
1939@@ -410,6 +783,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1940 return oldval;
1941 }
1942
1943+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1944+ long long new)
1945+{
1946+ long long oldval;
1947+ unsigned long res;
1948+
1949+ smp_mb();
1950+
1951+ do {
1952+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1953+ "ldrexd %1, %H1, [%3]\n"
1954+ "mov %0, #0\n"
1955+ "teq %1, %4\n"
1956+ "teqeq %H1, %H4\n"
1957+ "strexdeq %0, %5, %H5, [%3]"
1958+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1959+ : "r" (&ptr->counter), "r" (old), "r" (new)
1960+ : "cc");
1961+ } while (res);
1962+
1963+ smp_mb();
1964+
1965+ return oldval;
1966+}
1967+
1968 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1969 {
1970 long long result;
1971@@ -435,21 +833,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1972 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1973 {
1974 long long result;
1975- unsigned long tmp;
1976+ u64 tmp;
1977
1978 smp_mb();
1979 prefetchw(&v->counter);
1980
1981 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1982-"1: ldrexd %0, %H0, [%3]\n"
1983-" subs %Q0, %Q0, #1\n"
1984-" sbc %R0, %R0, #0\n"
1985+"1: ldrexd %1, %H1, [%3]\n"
1986+" subs %Q0, %Q1, #1\n"
1987+" sbcs %R0, %R1, #0\n"
1988+
1989+#ifdef CONFIG_PAX_REFCOUNT
1990+" bvc 3f\n"
1991+" mov %Q0, %Q1\n"
1992+" mov %R0, %R1\n"
1993+"2: bkpt 0xf103\n"
1994+"3:\n"
1995+#endif
1996+
1997 " teq %R0, #0\n"
1998-" bmi 2f\n"
1999+" bmi 4f\n"
2000 " strexd %1, %0, %H0, [%3]\n"
2001 " teq %1, #0\n"
2002 " bne 1b\n"
2003-"2:"
2004+"4:\n"
2005+
2006+#ifdef CONFIG_PAX_REFCOUNT
2007+ _ASM_EXTABLE(2b, 4b)
2008+#endif
2009+
2010 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
2011 : "r" (&v->counter)
2012 : "cc");
2013@@ -473,13 +885,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
2014 " teq %0, %5\n"
2015 " teqeq %H0, %H5\n"
2016 " moveq %1, #0\n"
2017-" beq 2f\n"
2018+" beq 4f\n"
2019 " adds %Q0, %Q0, %Q6\n"
2020-" adc %R0, %R0, %R6\n"
2021+" adcs %R0, %R0, %R6\n"
2022+
2023+#ifdef CONFIG_PAX_REFCOUNT
2024+" bvc 3f\n"
2025+"2: bkpt 0xf103\n"
2026+"3:\n"
2027+#endif
2028+
2029 " strexd %2, %0, %H0, [%4]\n"
2030 " teq %2, #0\n"
2031 " bne 1b\n"
2032-"2:"
2033+"4:\n"
2034+
2035+#ifdef CONFIG_PAX_REFCOUNT
2036+ _ASM_EXTABLE(2b, 4b)
2037+#endif
2038+
2039 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
2040 : "r" (&v->counter), "r" (u), "r" (a)
2041 : "cc");
2042@@ -492,10 +916,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
2043
2044 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
2045 #define atomic64_inc(v) atomic64_add(1LL, (v))
2046+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
2047 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
2048+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
2049 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2050 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
2051 #define atomic64_dec(v) atomic64_sub(1LL, (v))
2052+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
2053 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
2054 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
2055 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
2056diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
2057index c6a3e73..35cca85 100644
2058--- a/arch/arm/include/asm/barrier.h
2059+++ b/arch/arm/include/asm/barrier.h
2060@@ -63,7 +63,7 @@
2061 do { \
2062 compiletime_assert_atomic_type(*p); \
2063 smp_mb(); \
2064- ACCESS_ONCE(*p) = (v); \
2065+ ACCESS_ONCE_RW(*p) = (v); \
2066 } while (0)
2067
2068 #define smp_load_acquire(p) \
2069diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
2070index 75fe66b..ba3dee4 100644
2071--- a/arch/arm/include/asm/cache.h
2072+++ b/arch/arm/include/asm/cache.h
2073@@ -4,8 +4,10 @@
2074 #ifndef __ASMARM_CACHE_H
2075 #define __ASMARM_CACHE_H
2076
2077+#include <linux/const.h>
2078+
2079 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
2080-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2081+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2082
2083 /*
2084 * Memory returned by kmalloc() may be used for DMA, so we must make
2085@@ -24,5 +26,6 @@
2086 #endif
2087
2088 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
2089+#define __read_only __attribute__ ((__section__(".data..read_only")))
2090
2091 #endif
2092diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
2093index fd43f7f..a817f5a 100644
2094--- a/arch/arm/include/asm/cacheflush.h
2095+++ b/arch/arm/include/asm/cacheflush.h
2096@@ -116,7 +116,7 @@ struct cpu_cache_fns {
2097 void (*dma_unmap_area)(const void *, size_t, int);
2098
2099 void (*dma_flush_range)(const void *, const void *);
2100-};
2101+} __no_const;
2102
2103 /*
2104 * Select the calling method
2105diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
2106index 5233151..87a71fa 100644
2107--- a/arch/arm/include/asm/checksum.h
2108+++ b/arch/arm/include/asm/checksum.h
2109@@ -37,7 +37,19 @@ __wsum
2110 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
2111
2112 __wsum
2113-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
2114+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
2115+
2116+static inline __wsum
2117+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
2118+{
2119+ __wsum ret;
2120+ pax_open_userland();
2121+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
2122+ pax_close_userland();
2123+ return ret;
2124+}
2125+
2126+
2127
2128 /*
2129 * Fold a partial checksum without adding pseudo headers
2130diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
2131index abb2c37..96db950 100644
2132--- a/arch/arm/include/asm/cmpxchg.h
2133+++ b/arch/arm/include/asm/cmpxchg.h
2134@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
2135
2136 #define xchg(ptr,x) \
2137 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
2138+#define xchg_unchecked(ptr,x) \
2139+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
2140
2141 #include <asm-generic/cmpxchg-local.h>
2142
2143diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
2144index 6ddbe44..b5e38b1 100644
2145--- a/arch/arm/include/asm/domain.h
2146+++ b/arch/arm/include/asm/domain.h
2147@@ -48,18 +48,37 @@
2148 * Domain types
2149 */
2150 #define DOMAIN_NOACCESS 0
2151-#define DOMAIN_CLIENT 1
2152 #ifdef CONFIG_CPU_USE_DOMAINS
2153+#define DOMAIN_USERCLIENT 1
2154+#define DOMAIN_KERNELCLIENT 1
2155 #define DOMAIN_MANAGER 3
2156+#define DOMAIN_VECTORS DOMAIN_USER
2157 #else
2158+
2159+#ifdef CONFIG_PAX_KERNEXEC
2160 #define DOMAIN_MANAGER 1
2161+#define DOMAIN_KERNEXEC 3
2162+#else
2163+#define DOMAIN_MANAGER 1
2164+#endif
2165+
2166+#ifdef CONFIG_PAX_MEMORY_UDEREF
2167+#define DOMAIN_USERCLIENT 0
2168+#define DOMAIN_UDEREF 1
2169+#define DOMAIN_VECTORS DOMAIN_KERNEL
2170+#else
2171+#define DOMAIN_USERCLIENT 1
2172+#define DOMAIN_VECTORS DOMAIN_USER
2173+#endif
2174+#define DOMAIN_KERNELCLIENT 1
2175+
2176 #endif
2177
2178 #define domain_val(dom,type) ((type) << (2*(dom)))
2179
2180 #ifndef __ASSEMBLY__
2181
2182-#ifdef CONFIG_CPU_USE_DOMAINS
2183+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2184 static inline void set_domain(unsigned val)
2185 {
2186 asm volatile(
2187@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
2188 isb();
2189 }
2190
2191-#define modify_domain(dom,type) \
2192- do { \
2193- struct thread_info *thread = current_thread_info(); \
2194- unsigned int domain = thread->cpu_domain; \
2195- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
2196- thread->cpu_domain = domain | domain_val(dom, type); \
2197- set_domain(thread->cpu_domain); \
2198- } while (0)
2199-
2200+extern void modify_domain(unsigned int dom, unsigned int type);
2201 #else
2202 static inline void set_domain(unsigned val) { }
2203 static inline void modify_domain(unsigned dom, unsigned type) { }
2204diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
2205index f4b46d3..abc9b2b 100644
2206--- a/arch/arm/include/asm/elf.h
2207+++ b/arch/arm/include/asm/elf.h
2208@@ -114,7 +114,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
2209 the loader. We need to make sure that it is out of the way of the program
2210 that it will "exec", and that there is sufficient room for the brk. */
2211
2212-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2213+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2214+
2215+#ifdef CONFIG_PAX_ASLR
2216+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
2217+
2218+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
2219+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
2220+#endif
2221
2222 /* When the program starts, a1 contains a pointer to a function to be
2223 registered with atexit, as per the SVR4 ABI. A value of 0 means we
2224@@ -124,10 +131,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
2225 extern void elf_set_personality(const struct elf32_hdr *);
2226 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
2227
2228-struct mm_struct;
2229-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2230-#define arch_randomize_brk arch_randomize_brk
2231-
2232 #ifdef CONFIG_MMU
2233 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2234 struct linux_binprm;
2235diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
2236index de53547..52b9a28 100644
2237--- a/arch/arm/include/asm/fncpy.h
2238+++ b/arch/arm/include/asm/fncpy.h
2239@@ -81,7 +81,9 @@
2240 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
2241 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
2242 \
2243+ pax_open_kernel(); \
2244 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
2245+ pax_close_kernel(); \
2246 flush_icache_range((unsigned long)(dest_buf), \
2247 (unsigned long)(dest_buf) + (size)); \
2248 \
2249diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
2250index 53e69da..3fdc896 100644
2251--- a/arch/arm/include/asm/futex.h
2252+++ b/arch/arm/include/asm/futex.h
2253@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2254 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
2255 return -EFAULT;
2256
2257+ pax_open_userland();
2258+
2259 smp_mb();
2260 /* Prefetching cannot fault */
2261 prefetchw(uaddr);
2262@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2263 : "cc", "memory");
2264 smp_mb();
2265
2266+ pax_close_userland();
2267+
2268 *uval = val;
2269 return ret;
2270 }
2271@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2272 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
2273 return -EFAULT;
2274
2275+ pax_open_userland();
2276+
2277 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
2278 "1: " TUSER(ldr) " %1, [%4]\n"
2279 " teq %1, %2\n"
2280@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2281 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
2282 : "cc", "memory");
2283
2284+ pax_close_userland();
2285+
2286 *uval = val;
2287 return ret;
2288 }
2289@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
2290 return -EFAULT;
2291
2292 pagefault_disable(); /* implies preempt_disable() */
2293+ pax_open_userland();
2294
2295 switch (op) {
2296 case FUTEX_OP_SET:
2297@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
2298 ret = -ENOSYS;
2299 }
2300
2301+ pax_close_userland();
2302 pagefault_enable(); /* subsumes preempt_enable() */
2303
2304 if (!ret) {
2305diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
2306index 83eb2f7..ed77159 100644
2307--- a/arch/arm/include/asm/kmap_types.h
2308+++ b/arch/arm/include/asm/kmap_types.h
2309@@ -4,6 +4,6 @@
2310 /*
2311 * This is the "bare minimum". AIO seems to require this.
2312 */
2313-#define KM_TYPE_NR 16
2314+#define KM_TYPE_NR 17
2315
2316 #endif
2317diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
2318index 9e614a1..3302cca 100644
2319--- a/arch/arm/include/asm/mach/dma.h
2320+++ b/arch/arm/include/asm/mach/dma.h
2321@@ -22,7 +22,7 @@ struct dma_ops {
2322 int (*residue)(unsigned int, dma_t *); /* optional */
2323 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
2324 const char *type;
2325-};
2326+} __do_const;
2327
2328 struct dma_struct {
2329 void *addr; /* single DMA address */
2330diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
2331index f98c7f3..e5c626d 100644
2332--- a/arch/arm/include/asm/mach/map.h
2333+++ b/arch/arm/include/asm/mach/map.h
2334@@ -23,17 +23,19 @@ struct map_desc {
2335
2336 /* types 0-3 are defined in asm/io.h */
2337 enum {
2338- MT_UNCACHED = 4,
2339- MT_CACHECLEAN,
2340- MT_MINICLEAN,
2341+ MT_UNCACHED_RW = 4,
2342+ MT_CACHECLEAN_RO,
2343+ MT_MINICLEAN_RO,
2344 MT_LOW_VECTORS,
2345 MT_HIGH_VECTORS,
2346- MT_MEMORY_RWX,
2347+ __MT_MEMORY_RWX,
2348 MT_MEMORY_RW,
2349- MT_ROM,
2350- MT_MEMORY_RWX_NONCACHED,
2351+ MT_MEMORY_RX,
2352+ MT_ROM_RX,
2353+ MT_MEMORY_RW_NONCACHED,
2354+ MT_MEMORY_RX_NONCACHED,
2355 MT_MEMORY_RW_DTCM,
2356- MT_MEMORY_RWX_ITCM,
2357+ MT_MEMORY_RX_ITCM,
2358 MT_MEMORY_RW_SO,
2359 MT_MEMORY_DMA_READY,
2360 };
2361diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
2362index 891a56b..48f337e 100644
2363--- a/arch/arm/include/asm/outercache.h
2364+++ b/arch/arm/include/asm/outercache.h
2365@@ -36,7 +36,7 @@ struct outer_cache_fns {
2366
2367 /* This is an ARM L2C thing */
2368 void (*write_sec)(unsigned long, unsigned);
2369-};
2370+} __no_const;
2371
2372 extern struct outer_cache_fns outer_cache;
2373
2374diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
2375index 4355f0e..cd9168e 100644
2376--- a/arch/arm/include/asm/page.h
2377+++ b/arch/arm/include/asm/page.h
2378@@ -23,6 +23,7 @@
2379
2380 #else
2381
2382+#include <linux/compiler.h>
2383 #include <asm/glue.h>
2384
2385 /*
2386@@ -114,7 +115,7 @@ struct cpu_user_fns {
2387 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
2388 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
2389 unsigned long vaddr, struct vm_area_struct *vma);
2390-};
2391+} __no_const;
2392
2393 #ifdef MULTI_USER
2394 extern struct cpu_user_fns cpu_user;
2395diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
2396index 78a7793..e3dc06c 100644
2397--- a/arch/arm/include/asm/pgalloc.h
2398+++ b/arch/arm/include/asm/pgalloc.h
2399@@ -17,6 +17,7 @@
2400 #include <asm/processor.h>
2401 #include <asm/cacheflush.h>
2402 #include <asm/tlbflush.h>
2403+#include <asm/system_info.h>
2404
2405 #define check_pgt_cache() do { } while (0)
2406
2407@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2408 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
2409 }
2410
2411+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2412+{
2413+ pud_populate(mm, pud, pmd);
2414+}
2415+
2416 #else /* !CONFIG_ARM_LPAE */
2417
2418 /*
2419@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2420 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
2421 #define pmd_free(mm, pmd) do { } while (0)
2422 #define pud_populate(mm,pmd,pte) BUG()
2423+#define pud_populate_kernel(mm,pmd,pte) BUG()
2424
2425 #endif /* CONFIG_ARM_LPAE */
2426
2427@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
2428 __free_page(pte);
2429 }
2430
2431+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
2432+{
2433+#ifdef CONFIG_ARM_LPAE
2434+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2435+#else
2436+ if (addr & SECTION_SIZE)
2437+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
2438+ else
2439+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2440+#endif
2441+ flush_pmd_entry(pmdp);
2442+}
2443+
2444 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
2445 pmdval_t prot)
2446 {
2447@@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
2448 static inline void
2449 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
2450 {
2451- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
2452+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
2453 }
2454 #define pmd_pgtable(pmd) pmd_page(pmd)
2455
2456diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
2457index 5cfba15..f415e1a 100644
2458--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
2459+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
2460@@ -20,12 +20,15 @@
2461 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
2462 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
2463 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
2464+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
2465 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
2466 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
2467 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
2468+
2469 /*
2470 * - section
2471 */
2472+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
2473 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
2474 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
2475 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
2476@@ -37,6 +40,7 @@
2477 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
2478 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
2479 #define PMD_SECT_AF (_AT(pmdval_t, 0))
2480+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
2481
2482 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
2483 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
2484@@ -66,6 +70,7 @@
2485 * - extended small page/tiny page
2486 */
2487 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
2488+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
2489 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
2490 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
2491 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
2492diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
2493index 219ac88..73ec32a 100644
2494--- a/arch/arm/include/asm/pgtable-2level.h
2495+++ b/arch/arm/include/asm/pgtable-2level.h
2496@@ -126,6 +126,9 @@
2497 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
2498 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
2499
2500+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
2501+#define L_PTE_PXN (_AT(pteval_t, 0))
2502+
2503 /*
2504 * These are the memory types, defined to be compatible with
2505 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
2506diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
2507index 626989f..9d67a33 100644
2508--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
2509+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
2510@@ -75,6 +75,7 @@
2511 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2512 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
2513 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
2514+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2515 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
2516
2517 /*
2518diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
2519index 85c60ad..b0bbd7e 100644
2520--- a/arch/arm/include/asm/pgtable-3level.h
2521+++ b/arch/arm/include/asm/pgtable-3level.h
2522@@ -82,6 +82,7 @@
2523 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
2524 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2525 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
2526+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2527 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2528 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
2529 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
2530@@ -95,6 +96,7 @@
2531 /*
2532 * To be used in assembly code with the upper page attributes.
2533 */
2534+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2535 #define L_PTE_XN_HIGH (1 << (54 - 32))
2536 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2537
2538diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2539index 5478e5d..f5b5cb3 100644
2540--- a/arch/arm/include/asm/pgtable.h
2541+++ b/arch/arm/include/asm/pgtable.h
2542@@ -33,6 +33,9 @@
2543 #include <asm/pgtable-2level.h>
2544 #endif
2545
2546+#define ktla_ktva(addr) (addr)
2547+#define ktva_ktla(addr) (addr)
2548+
2549 /*
2550 * Just any arbitrary offset to the start of the vmalloc VM area: the
2551 * current 8MB value just means that there will be a 8MB "hole" after the
2552@@ -48,6 +51,9 @@
2553 #define LIBRARY_TEXT_START 0x0c000000
2554
2555 #ifndef __ASSEMBLY__
2556+extern pteval_t __supported_pte_mask;
2557+extern pmdval_t __supported_pmd_mask;
2558+
2559 extern void __pte_error(const char *file, int line, pte_t);
2560 extern void __pmd_error(const char *file, int line, pmd_t);
2561 extern void __pgd_error(const char *file, int line, pgd_t);
2562@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2563 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2564 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2565
2566+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2567+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2568+
2569+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2570+#include <asm/domain.h>
2571+#include <linux/thread_info.h>
2572+#include <linux/preempt.h>
2573+
2574+static inline int test_domain(int domain, int domaintype)
2575+{
2576+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2577+}
2578+#endif
2579+
2580+#ifdef CONFIG_PAX_KERNEXEC
2581+static inline unsigned long pax_open_kernel(void) {
2582+#ifdef CONFIG_ARM_LPAE
2583+ /* TODO */
2584+#else
2585+ preempt_disable();
2586+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2587+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2588+#endif
2589+ return 0;
2590+}
2591+
2592+static inline unsigned long pax_close_kernel(void) {
2593+#ifdef CONFIG_ARM_LPAE
2594+ /* TODO */
2595+#else
2596+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2597+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2598+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2599+ preempt_enable_no_resched();
2600+#endif
2601+ return 0;
2602+}
2603+#else
2604+static inline unsigned long pax_open_kernel(void) { return 0; }
2605+static inline unsigned long pax_close_kernel(void) { return 0; }
2606+#endif
2607+
2608 /*
2609 * This is the lowest virtual address we can permit any user space
2610 * mapping to be mapped at. This is particularly important for
2611@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2612 /*
2613 * The pgprot_* and protection_map entries will be fixed up in runtime
2614 * to include the cachable and bufferable bits based on memory policy,
2615- * as well as any architecture dependent bits like global/ASID and SMP
2616- * shared mapping bits.
2617+ * as well as any architecture dependent bits like global/ASID, PXN,
2618+ * and SMP shared mapping bits.
2619 */
2620 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2621
2622@@ -265,7 +313,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2623 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2624 {
2625 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2626- L_PTE_NONE | L_PTE_VALID;
2627+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2628 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2629 return pte;
2630 }
2631diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2632index c25ef3e..735f14b 100644
2633--- a/arch/arm/include/asm/psci.h
2634+++ b/arch/arm/include/asm/psci.h
2635@@ -32,7 +32,7 @@ struct psci_operations {
2636 int (*affinity_info)(unsigned long target_affinity,
2637 unsigned long lowest_affinity_level);
2638 int (*migrate_info_type)(void);
2639-};
2640+} __no_const;
2641
2642 extern struct psci_operations psci_ops;
2643 extern struct smp_operations psci_smp_ops;
2644diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2645index 2ec765c..beb1fe16 100644
2646--- a/arch/arm/include/asm/smp.h
2647+++ b/arch/arm/include/asm/smp.h
2648@@ -113,7 +113,7 @@ struct smp_operations {
2649 int (*cpu_disable)(unsigned int cpu);
2650 #endif
2651 #endif
2652-};
2653+} __no_const;
2654
2655 struct of_cpu_method {
2656 const char *method;
2657diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2658index e4e4208..086684a 100644
2659--- a/arch/arm/include/asm/thread_info.h
2660+++ b/arch/arm/include/asm/thread_info.h
2661@@ -88,9 +88,9 @@ struct thread_info {
2662 .flags = 0, \
2663 .preempt_count = INIT_PREEMPT_COUNT, \
2664 .addr_limit = KERNEL_DS, \
2665- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2666- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2667- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2668+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2669+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2670+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2671 .restart_block = { \
2672 .fn = do_no_restart_syscall, \
2673 }, \
2674@@ -164,7 +164,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2675 #define TIF_SYSCALL_AUDIT 9
2676 #define TIF_SYSCALL_TRACEPOINT 10
2677 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2678-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2679+/* within 8 bits of TIF_SYSCALL_TRACE
2680+ * to meet flexible second operand requirements
2681+ */
2682+#define TIF_GRSEC_SETXID 12
2683+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2684 #define TIF_USING_IWMMXT 17
2685 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2686 #define TIF_RESTORE_SIGMASK 20
2687@@ -178,10 +182,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2688 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2689 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2690 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2691+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2692
2693 /* Checks for any syscall work in entry-common.S */
2694 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2695- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2696+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2697
2698 /*
2699 * Change these and you break ASM code in entry-common.S
2700diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2701index 75d9579..b5b40e4 100644
2702--- a/arch/arm/include/asm/uaccess.h
2703+++ b/arch/arm/include/asm/uaccess.h
2704@@ -18,6 +18,7 @@
2705 #include <asm/domain.h>
2706 #include <asm/unified.h>
2707 #include <asm/compiler.h>
2708+#include <asm/pgtable.h>
2709
2710 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2711 #include <asm-generic/uaccess-unaligned.h>
2712@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2713 static inline void set_fs(mm_segment_t fs)
2714 {
2715 current_thread_info()->addr_limit = fs;
2716- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2717+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2718 }
2719
2720 #define segment_eq(a,b) ((a) == (b))
2721
2722+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2723+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2724+
2725+static inline void pax_open_userland(void)
2726+{
2727+
2728+#ifdef CONFIG_PAX_MEMORY_UDEREF
2729+ if (segment_eq(get_fs(), USER_DS)) {
2730+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2731+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2732+ }
2733+#endif
2734+
2735+}
2736+
2737+static inline void pax_close_userland(void)
2738+{
2739+
2740+#ifdef CONFIG_PAX_MEMORY_UDEREF
2741+ if (segment_eq(get_fs(), USER_DS)) {
2742+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2743+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2744+ }
2745+#endif
2746+
2747+}
2748+
2749 #define __addr_ok(addr) ({ \
2750 unsigned long flag; \
2751 __asm__("cmp %2, %0; movlo %0, #0" \
2752@@ -150,8 +178,12 @@ extern int __get_user_4(void *);
2753
2754 #define get_user(x,p) \
2755 ({ \
2756+ int __e; \
2757 might_fault(); \
2758- __get_user_check(x,p); \
2759+ pax_open_userland(); \
2760+ __e = __get_user_check(x,p); \
2761+ pax_close_userland(); \
2762+ __e; \
2763 })
2764
2765 extern int __put_user_1(void *, unsigned int);
2766@@ -196,8 +228,12 @@ extern int __put_user_8(void *, unsigned long long);
2767
2768 #define put_user(x,p) \
2769 ({ \
2770+ int __e; \
2771 might_fault(); \
2772- __put_user_check(x,p); \
2773+ pax_open_userland(); \
2774+ __e = __put_user_check(x,p); \
2775+ pax_close_userland(); \
2776+ __e; \
2777 })
2778
2779 #else /* CONFIG_MMU */
2780@@ -221,6 +257,7 @@ static inline void set_fs(mm_segment_t fs)
2781
2782 #endif /* CONFIG_MMU */
2783
2784+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2785 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2786
2787 #define user_addr_max() \
2788@@ -238,13 +275,17 @@ static inline void set_fs(mm_segment_t fs)
2789 #define __get_user(x,ptr) \
2790 ({ \
2791 long __gu_err = 0; \
2792+ pax_open_userland(); \
2793 __get_user_err((x),(ptr),__gu_err); \
2794+ pax_close_userland(); \
2795 __gu_err; \
2796 })
2797
2798 #define __get_user_error(x,ptr,err) \
2799 ({ \
2800+ pax_open_userland(); \
2801 __get_user_err((x),(ptr),err); \
2802+ pax_close_userland(); \
2803 (void) 0; \
2804 })
2805
2806@@ -320,13 +361,17 @@ do { \
2807 #define __put_user(x,ptr) \
2808 ({ \
2809 long __pu_err = 0; \
2810+ pax_open_userland(); \
2811 __put_user_err((x),(ptr),__pu_err); \
2812+ pax_close_userland(); \
2813 __pu_err; \
2814 })
2815
2816 #define __put_user_error(x,ptr,err) \
2817 ({ \
2818+ pax_open_userland(); \
2819 __put_user_err((x),(ptr),err); \
2820+ pax_close_userland(); \
2821 (void) 0; \
2822 })
2823
2824@@ -426,11 +471,44 @@ do { \
2825
2826
2827 #ifdef CONFIG_MMU
2828-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2829-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2830+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2831+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2832+
2833+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2834+{
2835+ unsigned long ret;
2836+
2837+ check_object_size(to, n, false);
2838+ pax_open_userland();
2839+ ret = ___copy_from_user(to, from, n);
2840+ pax_close_userland();
2841+ return ret;
2842+}
2843+
2844+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2845+{
2846+ unsigned long ret;
2847+
2848+ check_object_size(from, n, true);
2849+ pax_open_userland();
2850+ ret = ___copy_to_user(to, from, n);
2851+ pax_close_userland();
2852+ return ret;
2853+}
2854+
2855 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2856-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2857+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2858 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2859+
2860+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2861+{
2862+ unsigned long ret;
2863+ pax_open_userland();
2864+ ret = ___clear_user(addr, n);
2865+ pax_close_userland();
2866+ return ret;
2867+}
2868+
2869 #else
2870 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2871 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2872@@ -439,6 +517,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2873
2874 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2875 {
2876+ if ((long)n < 0)
2877+ return n;
2878+
2879 if (access_ok(VERIFY_READ, from, n))
2880 n = __copy_from_user(to, from, n);
2881 else /* security hole - plug it */
2882@@ -448,6 +529,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2883
2884 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2885 {
2886+ if ((long)n < 0)
2887+ return n;
2888+
2889 if (access_ok(VERIFY_WRITE, to, n))
2890 n = __copy_to_user(to, from, n);
2891 return n;
2892diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2893index 5af0ed1..cea83883 100644
2894--- a/arch/arm/include/uapi/asm/ptrace.h
2895+++ b/arch/arm/include/uapi/asm/ptrace.h
2896@@ -92,7 +92,7 @@
2897 * ARMv7 groups of PSR bits
2898 */
2899 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2900-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2901+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2902 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2903 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2904
2905diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2906index f7b450f..f5364c5 100644
2907--- a/arch/arm/kernel/armksyms.c
2908+++ b/arch/arm/kernel/armksyms.c
2909@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2910
2911 /* networking */
2912 EXPORT_SYMBOL(csum_partial);
2913-EXPORT_SYMBOL(csum_partial_copy_from_user);
2914+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2915 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2916 EXPORT_SYMBOL(__csum_ipv6_magic);
2917
2918@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2919 #ifdef CONFIG_MMU
2920 EXPORT_SYMBOL(copy_page);
2921
2922-EXPORT_SYMBOL(__copy_from_user);
2923-EXPORT_SYMBOL(__copy_to_user);
2924-EXPORT_SYMBOL(__clear_user);
2925+EXPORT_SYMBOL(___copy_from_user);
2926+EXPORT_SYMBOL(___copy_to_user);
2927+EXPORT_SYMBOL(___clear_user);
2928
2929 EXPORT_SYMBOL(__get_user_1);
2930 EXPORT_SYMBOL(__get_user_2);
2931diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2932index 52a949a..d8bbcab 100644
2933--- a/arch/arm/kernel/entry-armv.S
2934+++ b/arch/arm/kernel/entry-armv.S
2935@@ -47,6 +47,87 @@
2936 9997:
2937 .endm
2938
2939+ .macro pax_enter_kernel
2940+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2941+ @ make aligned space for saved DACR
2942+ sub sp, sp, #8
2943+ @ save regs
2944+ stmdb sp!, {r1, r2}
2945+ @ read DACR from cpu_domain into r1
2946+ mov r2, sp
2947+ @ assume 8K pages, since we have to split the immediate in two
2948+ bic r2, r2, #(0x1fc0)
2949+ bic r2, r2, #(0x3f)
2950+ ldr r1, [r2, #TI_CPU_DOMAIN]
2951+ @ store old DACR on stack
2952+ str r1, [sp, #8]
2953+#ifdef CONFIG_PAX_KERNEXEC
2954+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2955+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2956+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2957+#endif
2958+#ifdef CONFIG_PAX_MEMORY_UDEREF
2959+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2960+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2961+#endif
2962+ @ write r1 to current_thread_info()->cpu_domain
2963+ str r1, [r2, #TI_CPU_DOMAIN]
2964+ @ write r1 to DACR
2965+ mcr p15, 0, r1, c3, c0, 0
2966+ @ instruction sync
2967+ instr_sync
2968+ @ restore regs
2969+ ldmia sp!, {r1, r2}
2970+#endif
2971+ .endm
2972+
2973+ .macro pax_open_userland
2974+#ifdef CONFIG_PAX_MEMORY_UDEREF
2975+ @ save regs
2976+ stmdb sp!, {r0, r1}
2977+ @ read DACR from cpu_domain into r1
2978+ mov r0, sp
2979+ @ assume 8K pages, since we have to split the immediate in two
2980+ bic r0, r0, #(0x1fc0)
2981+ bic r0, r0, #(0x3f)
2982+ ldr r1, [r0, #TI_CPU_DOMAIN]
2983+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2984+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2985+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2986+ @ write r1 to current_thread_info()->cpu_domain
2987+ str r1, [r0, #TI_CPU_DOMAIN]
2988+ @ write r1 to DACR
2989+ mcr p15, 0, r1, c3, c0, 0
2990+ @ instruction sync
2991+ instr_sync
2992+ @ restore regs
2993+ ldmia sp!, {r0, r1}
2994+#endif
2995+ .endm
2996+
2997+ .macro pax_close_userland
2998+#ifdef CONFIG_PAX_MEMORY_UDEREF
2999+ @ save regs
3000+ stmdb sp!, {r0, r1}
3001+ @ read DACR from cpu_domain into r1
3002+ mov r0, sp
3003+ @ assume 8K pages, since we have to split the immediate in two
3004+ bic r0, r0, #(0x1fc0)
3005+ bic r0, r0, #(0x3f)
3006+ ldr r1, [r0, #TI_CPU_DOMAIN]
3007+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
3008+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
3009+ @ write r1 to current_thread_info()->cpu_domain
3010+ str r1, [r0, #TI_CPU_DOMAIN]
3011+ @ write r1 to DACR
3012+ mcr p15, 0, r1, c3, c0, 0
3013+ @ instruction sync
3014+ instr_sync
3015+ @ restore regs
3016+ ldmia sp!, {r0, r1}
3017+#endif
3018+ .endm
3019+
3020 .macro pabt_helper
3021 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
3022 #ifdef MULTI_PABORT
3023@@ -89,11 +170,15 @@
3024 * Invalid mode handlers
3025 */
3026 .macro inv_entry, reason
3027+
3028+ pax_enter_kernel
3029+
3030 sub sp, sp, #S_FRAME_SIZE
3031 ARM( stmib sp, {r1 - lr} )
3032 THUMB( stmia sp, {r0 - r12} )
3033 THUMB( str sp, [sp, #S_SP] )
3034 THUMB( str lr, [sp, #S_LR] )
3035+
3036 mov r1, #\reason
3037 .endm
3038
3039@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
3040 .macro svc_entry, stack_hole=0
3041 UNWIND(.fnstart )
3042 UNWIND(.save {r0 - pc} )
3043+
3044+ pax_enter_kernel
3045+
3046 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
3047+
3048 #ifdef CONFIG_THUMB2_KERNEL
3049 SPFIX( str r0, [sp] ) @ temporarily saved
3050 SPFIX( mov r0, sp )
3051@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
3052 ldmia r0, {r3 - r5}
3053 add r7, sp, #S_SP - 4 @ here for interlock avoidance
3054 mov r6, #-1 @ "" "" "" ""
3055+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3056+ @ offset sp by 8 as done in pax_enter_kernel
3057+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
3058+#else
3059 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
3060+#endif
3061 SPFIX( addeq r2, r2, #4 )
3062 str r3, [sp, #-4]! @ save the "real" r0 copied
3063 @ from the exception stack
3064@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
3065 .macro usr_entry
3066 UNWIND(.fnstart )
3067 UNWIND(.cantunwind ) @ don't unwind the user space
3068+
3069+ pax_enter_kernel_user
3070+
3071 sub sp, sp, #S_FRAME_SIZE
3072 ARM( stmib sp, {r1 - r12} )
3073 THUMB( stmia sp, {r0 - r12} )
3074@@ -421,7 +518,9 @@ __und_usr:
3075 tst r3, #PSR_T_BIT @ Thumb mode?
3076 bne __und_usr_thumb
3077 sub r4, r2, #4 @ ARM instr at LR - 4
3078+ pax_open_userland
3079 1: ldrt r0, [r4]
3080+ pax_close_userland
3081 ARM_BE8(rev r0, r0) @ little endian instruction
3082
3083 @ r0 = 32-bit ARM instruction which caused the exception
3084@@ -455,11 +554,15 @@ __und_usr_thumb:
3085 */
3086 .arch armv6t2
3087 #endif
3088+ pax_open_userland
3089 2: ldrht r5, [r4]
3090+ pax_close_userland
3091 ARM_BE8(rev16 r5, r5) @ little endian instruction
3092 cmp r5, #0xe800 @ 32bit instruction if xx != 0
3093 blo __und_usr_fault_16 @ 16bit undefined instruction
3094+ pax_open_userland
3095 3: ldrht r0, [r2]
3096+ pax_close_userland
3097 ARM_BE8(rev16 r0, r0) @ little endian instruction
3098 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
3099 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
3100@@ -489,7 +592,8 @@ ENDPROC(__und_usr)
3101 */
3102 .pushsection .fixup, "ax"
3103 .align 2
3104-4: str r4, [sp, #S_PC] @ retry current instruction
3105+4: pax_close_userland
3106+ str r4, [sp, #S_PC] @ retry current instruction
3107 mov pc, r9
3108 .popsection
3109 .pushsection __ex_table,"a"
3110@@ -698,7 +802,7 @@ ENTRY(__switch_to)
3111 THUMB( str lr, [ip], #4 )
3112 ldr r4, [r2, #TI_TP_VALUE]
3113 ldr r5, [r2, #TI_TP_VALUE + 4]
3114-#ifdef CONFIG_CPU_USE_DOMAINS
3115+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3116 ldr r6, [r2, #TI_CPU_DOMAIN]
3117 #endif
3118 switch_tls r1, r4, r5, r3, r7
3119@@ -707,7 +811,7 @@ ENTRY(__switch_to)
3120 ldr r8, =__stack_chk_guard
3121 ldr r7, [r7, #TSK_STACK_CANARY]
3122 #endif
3123-#ifdef CONFIG_CPU_USE_DOMAINS
3124+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3125 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
3126 #endif
3127 mov r5, r0
3128diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
3129index 7139d4a..feaf37f 100644
3130--- a/arch/arm/kernel/entry-common.S
3131+++ b/arch/arm/kernel/entry-common.S
3132@@ -10,18 +10,46 @@
3133
3134 #include <asm/unistd.h>
3135 #include <asm/ftrace.h>
3136+#include <asm/domain.h>
3137 #include <asm/unwind.h>
3138
3139+#include "entry-header.S"
3140+
3141 #ifdef CONFIG_NEED_RET_TO_USER
3142 #include <mach/entry-macro.S>
3143 #else
3144 .macro arch_ret_to_user, tmp1, tmp2
3145+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3146+ @ save regs
3147+ stmdb sp!, {r1, r2}
3148+ @ read DACR from cpu_domain into r1
3149+ mov r2, sp
3150+ @ assume 8K pages, since we have to split the immediate in two
3151+ bic r2, r2, #(0x1fc0)
3152+ bic r2, r2, #(0x3f)
3153+ ldr r1, [r2, #TI_CPU_DOMAIN]
3154+#ifdef CONFIG_PAX_KERNEXEC
3155+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
3156+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
3157+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
3158+#endif
3159+#ifdef CONFIG_PAX_MEMORY_UDEREF
3160+ @ set current DOMAIN_USER to DOMAIN_UDEREF
3161+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
3162+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
3163+#endif
3164+ @ write r1 to current_thread_info()->cpu_domain
3165+ str r1, [r2, #TI_CPU_DOMAIN]
3166+ @ write r1 to DACR
3167+ mcr p15, 0, r1, c3, c0, 0
3168+ @ instruction sync
3169+ instr_sync
3170+ @ restore regs
3171+ ldmia sp!, {r1, r2}
3172+#endif
3173 .endm
3174 #endif
3175
3176-#include "entry-header.S"
3177-
3178-
3179 .align 5
3180 /*
3181 * This is the fast syscall return path. We do as little as
3182@@ -405,6 +433,12 @@ ENTRY(vector_swi)
3183 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
3184 #endif
3185
3186+ /*
3187+ * do this here to avoid a performance hit of wrapping the code above
3188+ * that directly dereferences userland to parse the SWI instruction
3189+ */
3190+ pax_enter_kernel_user
3191+
3192 adr tbl, sys_call_table @ load syscall table pointer
3193
3194 #if defined(CONFIG_OABI_COMPAT)
3195diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
3196index 5d702f8..f5fc51a 100644
3197--- a/arch/arm/kernel/entry-header.S
3198+++ b/arch/arm/kernel/entry-header.S
3199@@ -188,6 +188,60 @@
3200 msr cpsr_c, \rtemp @ switch back to the SVC mode
3201 .endm
3202
3203+ .macro pax_enter_kernel_user
3204+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3205+ @ save regs
3206+ stmdb sp!, {r0, r1}
3207+ @ read DACR from cpu_domain into r1
3208+ mov r0, sp
3209+ @ assume 8K pages, since we have to split the immediate in two
3210+ bic r0, r0, #(0x1fc0)
3211+ bic r0, r0, #(0x3f)
3212+ ldr r1, [r0, #TI_CPU_DOMAIN]
3213+#ifdef CONFIG_PAX_MEMORY_UDEREF
3214+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
3215+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
3216+#endif
3217+#ifdef CONFIG_PAX_KERNEXEC
3218+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
3219+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
3220+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
3221+#endif
3222+ @ write r1 to current_thread_info()->cpu_domain
3223+ str r1, [r0, #TI_CPU_DOMAIN]
3224+ @ write r1 to DACR
3225+ mcr p15, 0, r1, c3, c0, 0
3226+ @ instruction sync
3227+ instr_sync
3228+ @ restore regs
3229+ ldmia sp!, {r0, r1}
3230+#endif
3231+ .endm
3232+
3233+ .macro pax_exit_kernel
3234+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3235+ @ save regs
3236+ stmdb sp!, {r0, r1}
3237+ @ read old DACR from stack into r1
3238+ ldr r1, [sp, #(8 + S_SP)]
3239+ sub r1, r1, #8
3240+ ldr r1, [r1]
3241+
3242+ @ write r1 to current_thread_info()->cpu_domain
3243+ mov r0, sp
3244+ @ assume 8K pages, since we have to split the immediate in two
3245+ bic r0, r0, #(0x1fc0)
3246+ bic r0, r0, #(0x3f)
3247+ str r1, [r0, #TI_CPU_DOMAIN]
3248+ @ write r1 to DACR
3249+ mcr p15, 0, r1, c3, c0, 0
3250+ @ instruction sync
3251+ instr_sync
3252+ @ restore regs
3253+ ldmia sp!, {r0, r1}
3254+#endif
3255+ .endm
3256+
3257 #ifndef CONFIG_THUMB2_KERNEL
3258 .macro svc_exit, rpsr, irq = 0
3259 .if \irq != 0
3260@@ -207,6 +261,9 @@
3261 blne trace_hardirqs_off
3262 #endif
3263 .endif
3264+
3265+ pax_exit_kernel
3266+
3267 msr spsr_cxsf, \rpsr
3268 #if defined(CONFIG_CPU_V6)
3269 ldr r0, [sp]
3270@@ -265,6 +322,9 @@
3271 blne trace_hardirqs_off
3272 #endif
3273 .endif
3274+
3275+ pax_exit_kernel
3276+
3277 ldr lr, [sp, #S_SP] @ top of the stack
3278 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
3279 clrex @ clear the exclusive monitor
3280diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
3281index 918875d..cd5fa27 100644
3282--- a/arch/arm/kernel/fiq.c
3283+++ b/arch/arm/kernel/fiq.c
3284@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length)
3285 void *base = vectors_page;
3286 unsigned offset = FIQ_OFFSET;
3287
3288+ pax_open_kernel();
3289 memcpy(base + offset, start, length);
3290+ pax_close_kernel();
3291+
3292 if (!cache_is_vipt_nonaliasing())
3293 flush_icache_range((unsigned long)base + offset, offset +
3294 length);
3295diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
3296index 2c35f0f..7747ee6 100644
3297--- a/arch/arm/kernel/head.S
3298+++ b/arch/arm/kernel/head.S
3299@@ -437,7 +437,7 @@ __enable_mmu:
3300 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
3301 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
3302 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
3303- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
3304+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
3305 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
3306 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
3307 #endif
3308diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
3309index 45e4781..8eac93d 100644
3310--- a/arch/arm/kernel/module.c
3311+++ b/arch/arm/kernel/module.c
3312@@ -38,12 +38,39 @@
3313 #endif
3314
3315 #ifdef CONFIG_MMU
3316-void *module_alloc(unsigned long size)
3317+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
3318 {
3319+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
3320+ return NULL;
3321 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
3322- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
3323+ GFP_KERNEL, prot, NUMA_NO_NODE,
3324 __builtin_return_address(0));
3325 }
3326+
3327+void *module_alloc(unsigned long size)
3328+{
3329+
3330+#ifdef CONFIG_PAX_KERNEXEC
3331+ return __module_alloc(size, PAGE_KERNEL);
3332+#else
3333+ return __module_alloc(size, PAGE_KERNEL_EXEC);
3334+#endif
3335+
3336+}
3337+
3338+#ifdef CONFIG_PAX_KERNEXEC
3339+void module_free_exec(struct module *mod, void *module_region)
3340+{
3341+ module_free(mod, module_region);
3342+}
3343+EXPORT_SYMBOL(module_free_exec);
3344+
3345+void *module_alloc_exec(unsigned long size)
3346+{
3347+ return __module_alloc(size, PAGE_KERNEL_EXEC);
3348+}
3349+EXPORT_SYMBOL(module_alloc_exec);
3350+#endif
3351 #endif
3352
3353 int
3354diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
3355index 07314af..c46655c 100644
3356--- a/arch/arm/kernel/patch.c
3357+++ b/arch/arm/kernel/patch.c
3358@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
3359 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
3360 int size;
3361
3362+ pax_open_kernel();
3363 if (thumb2 && __opcode_is_thumb16(insn)) {
3364 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
3365 size = sizeof(u16);
3366@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
3367 *(u32 *)addr = insn;
3368 size = sizeof(u32);
3369 }
3370+ pax_close_kernel();
3371
3372 flush_icache_range((uintptr_t)(addr),
3373 (uintptr_t)(addr) + size);
3374diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
3375index 81ef686..f4130b8 100644
3376--- a/arch/arm/kernel/process.c
3377+++ b/arch/arm/kernel/process.c
3378@@ -212,6 +212,7 @@ void machine_power_off(void)
3379
3380 if (pm_power_off)
3381 pm_power_off();
3382+ BUG();
3383 }
3384
3385 /*
3386@@ -225,7 +226,7 @@ void machine_power_off(void)
3387 * executing pre-reset code, and using RAM that the primary CPU's code wishes
3388 * to use. Implementing such co-ordination would be essentially impossible.
3389 */
3390-void machine_restart(char *cmd)
3391+__noreturn void machine_restart(char *cmd)
3392 {
3393 local_irq_disable();
3394 smp_send_stop();
3395@@ -248,8 +249,8 @@ void __show_regs(struct pt_regs *regs)
3396
3397 show_regs_print_info(KERN_DEFAULT);
3398
3399- print_symbol("PC is at %s\n", instruction_pointer(regs));
3400- print_symbol("LR is at %s\n", regs->ARM_lr);
3401+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
3402+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
3403 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
3404 "sp : %08lx ip : %08lx fp : %08lx\n",
3405 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
3406@@ -425,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p)
3407 return 0;
3408 }
3409
3410-unsigned long arch_randomize_brk(struct mm_struct *mm)
3411-{
3412- unsigned long range_end = mm->brk + 0x02000000;
3413- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
3414-}
3415-
3416 #ifdef CONFIG_MMU
3417 #ifdef CONFIG_KUSER_HELPERS
3418 /*
3419@@ -446,7 +441,7 @@ static struct vm_area_struct gate_vma = {
3420
3421 static int __init gate_vma_init(void)
3422 {
3423- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
3424+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
3425 return 0;
3426 }
3427 arch_initcall(gate_vma_init);
3428@@ -472,41 +467,16 @@ int in_gate_area_no_mm(unsigned long addr)
3429
3430 const char *arch_vma_name(struct vm_area_struct *vma)
3431 {
3432- return is_gate_vma(vma) ? "[vectors]" :
3433- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
3434- "[sigpage]" : NULL;
3435+ return is_gate_vma(vma) ? "[vectors]" : NULL;
3436 }
3437
3438-static struct page *signal_page;
3439-extern struct page *get_signal_page(void);
3440-
3441 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3442 {
3443 struct mm_struct *mm = current->mm;
3444- unsigned long addr;
3445- int ret;
3446-
3447- if (!signal_page)
3448- signal_page = get_signal_page();
3449- if (!signal_page)
3450- return -ENOMEM;
3451
3452 down_write(&mm->mmap_sem);
3453- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
3454- if (IS_ERR_VALUE(addr)) {
3455- ret = addr;
3456- goto up_fail;
3457- }
3458-
3459- ret = install_special_mapping(mm, addr, PAGE_SIZE,
3460- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
3461- &signal_page);
3462-
3463- if (ret == 0)
3464- mm->context.sigpage = addr;
3465-
3466- up_fail:
3467+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
3468 up_write(&mm->mmap_sem);
3469- return ret;
3470+ return 0;
3471 }
3472 #endif
3473diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
3474index f73891b..cf3004e 100644
3475--- a/arch/arm/kernel/psci.c
3476+++ b/arch/arm/kernel/psci.c
3477@@ -28,7 +28,7 @@
3478 #include <asm/psci.h>
3479 #include <asm/system_misc.h>
3480
3481-struct psci_operations psci_ops;
3482+struct psci_operations psci_ops __read_only;
3483
3484 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3485 typedef int (*psci_initcall_t)(const struct device_node *);
3486diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3487index 0c27ed6..b67388e 100644
3488--- a/arch/arm/kernel/ptrace.c
3489+++ b/arch/arm/kernel/ptrace.c
3490@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3491 regs->ARM_ip = ip;
3492 }
3493
3494+#ifdef CONFIG_GRKERNSEC_SETXID
3495+extern void gr_delayed_cred_worker(void);
3496+#endif
3497+
3498 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3499 {
3500 current_thread_info()->syscall = scno;
3501
3502+#ifdef CONFIG_GRKERNSEC_SETXID
3503+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3504+ gr_delayed_cred_worker();
3505+#endif
3506+
3507 /* Do the secure computing check first; failures should be fast. */
3508 if (secure_computing(scno) == -1)
3509 return -1;
3510diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3511index 8a16ee5..4f560e5 100644
3512--- a/arch/arm/kernel/setup.c
3513+++ b/arch/arm/kernel/setup.c
3514@@ -104,21 +104,23 @@ EXPORT_SYMBOL(elf_hwcap);
3515 unsigned int elf_hwcap2 __read_mostly;
3516 EXPORT_SYMBOL(elf_hwcap2);
3517
3518+pteval_t __supported_pte_mask __read_only;
3519+pmdval_t __supported_pmd_mask __read_only;
3520
3521 #ifdef MULTI_CPU
3522-struct processor processor __read_mostly;
3523+struct processor processor __read_only;
3524 #endif
3525 #ifdef MULTI_TLB
3526-struct cpu_tlb_fns cpu_tlb __read_mostly;
3527+struct cpu_tlb_fns cpu_tlb __read_only;
3528 #endif
3529 #ifdef MULTI_USER
3530-struct cpu_user_fns cpu_user __read_mostly;
3531+struct cpu_user_fns cpu_user __read_only;
3532 #endif
3533 #ifdef MULTI_CACHE
3534-struct cpu_cache_fns cpu_cache __read_mostly;
3535+struct cpu_cache_fns cpu_cache __read_only;
3536 #endif
3537 #ifdef CONFIG_OUTER_CACHE
3538-struct outer_cache_fns outer_cache __read_mostly;
3539+struct outer_cache_fns outer_cache __read_only;
3540 EXPORT_SYMBOL(outer_cache);
3541 #endif
3542
3543@@ -251,9 +253,13 @@ static int __get_cpu_architecture(void)
3544 asm("mrc p15, 0, %0, c0, c1, 4"
3545 : "=r" (mmfr0));
3546 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3547- (mmfr0 & 0x000000f0) >= 0x00000030)
3548+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3549 cpu_arch = CPU_ARCH_ARMv7;
3550- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3551+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3552+ __supported_pte_mask |= L_PTE_PXN;
3553+ __supported_pmd_mask |= PMD_PXNTABLE;
3554+ }
3555+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3556 (mmfr0 & 0x000000f0) == 0x00000020)
3557 cpu_arch = CPU_ARCH_ARMv6;
3558 else
3559diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3560index bd19834..e4d8c66 100644
3561--- a/arch/arm/kernel/signal.c
3562+++ b/arch/arm/kernel/signal.c
3563@@ -24,8 +24,6 @@
3564
3565 extern const unsigned long sigreturn_codes[7];
3566
3567-static unsigned long signal_return_offset;
3568-
3569 #ifdef CONFIG_CRUNCH
3570 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3571 {
3572@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3573 * except when the MPU has protected the vectors
3574 * page from PL0
3575 */
3576- retcode = mm->context.sigpage + signal_return_offset +
3577- (idx << 2) + thumb;
3578+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3579 } else
3580 #endif
3581 {
3582@@ -604,33 +601,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3583 } while (thread_flags & _TIF_WORK_MASK);
3584 return 0;
3585 }
3586-
3587-struct page *get_signal_page(void)
3588-{
3589- unsigned long ptr;
3590- unsigned offset;
3591- struct page *page;
3592- void *addr;
3593-
3594- page = alloc_pages(GFP_KERNEL, 0);
3595-
3596- if (!page)
3597- return NULL;
3598-
3599- addr = page_address(page);
3600-
3601- /* Give the signal return code some randomness */
3602- offset = 0x200 + (get_random_int() & 0x7fc);
3603- signal_return_offset = offset;
3604-
3605- /*
3606- * Copy signal return handlers into the vector page, and
3607- * set sigreturn to be a pointer to these.
3608- */
3609- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3610-
3611- ptr = (unsigned long)addr + offset;
3612- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3613-
3614- return page;
3615-}
3616diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3617index 7c4fada..8581286 100644
3618--- a/arch/arm/kernel/smp.c
3619+++ b/arch/arm/kernel/smp.c
3620@@ -73,7 +73,7 @@ enum ipi_msg_type {
3621
3622 static DECLARE_COMPLETION(cpu_running);
3623
3624-static struct smp_operations smp_ops;
3625+static struct smp_operations smp_ops __read_only;
3626
3627 void __init smp_set_ops(struct smp_operations *ops)
3628 {
3629diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3630index 7a3be1d..b00c7de 100644
3631--- a/arch/arm/kernel/tcm.c
3632+++ b/arch/arm/kernel/tcm.c
3633@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3634 .virtual = ITCM_OFFSET,
3635 .pfn = __phys_to_pfn(ITCM_OFFSET),
3636 .length = 0,
3637- .type = MT_MEMORY_RWX_ITCM,
3638+ .type = MT_MEMORY_RX_ITCM,
3639 }
3640 };
3641
3642@@ -267,7 +267,9 @@ no_dtcm:
3643 start = &__sitcm_text;
3644 end = &__eitcm_text;
3645 ram = &__itcm_start;
3646+ pax_open_kernel();
3647 memcpy(start, ram, itcm_code_sz);
3648+ pax_close_kernel();
3649 pr_debug("CPU ITCM: copied code from %p - %p\n",
3650 start, end);
3651 itcm_present = true;
3652diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3653index abd2fc0..895dbb6 100644
3654--- a/arch/arm/kernel/traps.c
3655+++ b/arch/arm/kernel/traps.c
3656@@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3657 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3658 {
3659 #ifdef CONFIG_KALLSYMS
3660- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3661+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3662 #else
3663 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3664 #endif
3665@@ -264,6 +264,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3666 static int die_owner = -1;
3667 static unsigned int die_nest_count;
3668
3669+extern void gr_handle_kernel_exploit(void);
3670+
3671 static unsigned long oops_begin(void)
3672 {
3673 int cpu;
3674@@ -306,6 +308,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3675 panic("Fatal exception in interrupt");
3676 if (panic_on_oops)
3677 panic("Fatal exception");
3678+
3679+ gr_handle_kernel_exploit();
3680+
3681 if (signr)
3682 do_exit(signr);
3683 }
3684@@ -643,7 +648,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3685 * The user helper at 0xffff0fe0 must be used instead.
3686 * (see entry-armv.S for details)
3687 */
3688+ pax_open_kernel();
3689 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3690+ pax_close_kernel();
3691 }
3692 return 0;
3693
3694@@ -900,7 +907,11 @@ void __init early_trap_init(void *vectors_base)
3695 kuser_init(vectors_base);
3696
3697 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3698- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3699+
3700+#ifndef CONFIG_PAX_MEMORY_UDEREF
3701+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3702+#endif
3703+
3704 #else /* ifndef CONFIG_CPU_V7M */
3705 /*
3706 * on V7-M there is no need to copy the vector table to a dedicated
3707diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3708index 7bcee5c..e2f3249 100644
3709--- a/arch/arm/kernel/vmlinux.lds.S
3710+++ b/arch/arm/kernel/vmlinux.lds.S
3711@@ -8,7 +8,11 @@
3712 #include <asm/thread_info.h>
3713 #include <asm/memory.h>
3714 #include <asm/page.h>
3715-
3716+
3717+#ifdef CONFIG_PAX_KERNEXEC
3718+#include <asm/pgtable.h>
3719+#endif
3720+
3721 #define PROC_INFO \
3722 . = ALIGN(4); \
3723 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3724@@ -34,7 +38,7 @@
3725 #endif
3726
3727 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3728- defined(CONFIG_GENERIC_BUG)
3729+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3730 #define ARM_EXIT_KEEP(x) x
3731 #define ARM_EXIT_DISCARD(x)
3732 #else
3733@@ -90,6 +94,11 @@ SECTIONS
3734 _text = .;
3735 HEAD_TEXT
3736 }
3737+
3738+#ifdef CONFIG_PAX_KERNEXEC
3739+ . = ALIGN(1<<SECTION_SHIFT);
3740+#endif
3741+
3742 .text : { /* Real text segment */
3743 _stext = .; /* Text and read-only data */
3744 __exception_text_start = .;
3745@@ -112,6 +121,8 @@ SECTIONS
3746 ARM_CPU_KEEP(PROC_INFO)
3747 }
3748
3749+ _etext = .; /* End of text section */
3750+
3751 RO_DATA(PAGE_SIZE)
3752
3753 . = ALIGN(4);
3754@@ -142,7 +153,9 @@ SECTIONS
3755
3756 NOTES
3757
3758- _etext = .; /* End of text and rodata section */
3759+#ifdef CONFIG_PAX_KERNEXEC
3760+ . = ALIGN(1<<SECTION_SHIFT);
3761+#endif
3762
3763 #ifndef CONFIG_XIP_KERNEL
3764 . = ALIGN(PAGE_SIZE);
3765@@ -220,6 +233,11 @@ SECTIONS
3766 . = PAGE_OFFSET + TEXT_OFFSET;
3767 #else
3768 __init_end = .;
3769+
3770+#ifdef CONFIG_PAX_KERNEXEC
3771+ . = ALIGN(1<<SECTION_SHIFT);
3772+#endif
3773+
3774 . = ALIGN(THREAD_SIZE);
3775 __data_loc = .;
3776 #endif
3777diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3778index 3c82b37..bd41745 100644
3779--- a/arch/arm/kvm/arm.c
3780+++ b/arch/arm/kvm/arm.c
3781@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3782 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3783
3784 /* The VMID used in the VTTBR */
3785-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3786+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3787 static u8 kvm_next_vmid;
3788 static DEFINE_SPINLOCK(kvm_vmid_lock);
3789
3790@@ -409,7 +409,7 @@ void force_vm_exit(const cpumask_t *mask)
3791 */
3792 static bool need_new_vmid_gen(struct kvm *kvm)
3793 {
3794- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3795+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3796 }
3797
3798 /**
3799@@ -442,7 +442,7 @@ static void update_vttbr(struct kvm *kvm)
3800
3801 /* First user of a new VMID generation? */
3802 if (unlikely(kvm_next_vmid == 0)) {
3803- atomic64_inc(&kvm_vmid_gen);
3804+ atomic64_inc_unchecked(&kvm_vmid_gen);
3805 kvm_next_vmid = 1;
3806
3807 /*
3808@@ -459,7 +459,7 @@ static void update_vttbr(struct kvm *kvm)
3809 kvm_call_hyp(__kvm_flush_vm_context);
3810 }
3811
3812- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3813+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3814 kvm->arch.vmid = kvm_next_vmid;
3815 kvm_next_vmid++;
3816
3817diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3818index 14a0d98..7771a7d 100644
3819--- a/arch/arm/lib/clear_user.S
3820+++ b/arch/arm/lib/clear_user.S
3821@@ -12,14 +12,14 @@
3822
3823 .text
3824
3825-/* Prototype: int __clear_user(void *addr, size_t sz)
3826+/* Prototype: int ___clear_user(void *addr, size_t sz)
3827 * Purpose : clear some user memory
3828 * Params : addr - user memory address to clear
3829 * : sz - number of bytes to clear
3830 * Returns : number of bytes NOT cleared
3831 */
3832 ENTRY(__clear_user_std)
3833-WEAK(__clear_user)
3834+WEAK(___clear_user)
3835 stmfd sp!, {r1, lr}
3836 mov r2, #0
3837 cmp r1, #4
3838@@ -44,7 +44,7 @@ WEAK(__clear_user)
3839 USER( strnebt r2, [r0])
3840 mov r0, #0
3841 ldmfd sp!, {r1, pc}
3842-ENDPROC(__clear_user)
3843+ENDPROC(___clear_user)
3844 ENDPROC(__clear_user_std)
3845
3846 .pushsection .fixup,"ax"
3847diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3848index 66a477a..bee61d3 100644
3849--- a/arch/arm/lib/copy_from_user.S
3850+++ b/arch/arm/lib/copy_from_user.S
3851@@ -16,7 +16,7 @@
3852 /*
3853 * Prototype:
3854 *
3855- * size_t __copy_from_user(void *to, const void *from, size_t n)
3856+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3857 *
3858 * Purpose:
3859 *
3860@@ -84,11 +84,11 @@
3861
3862 .text
3863
3864-ENTRY(__copy_from_user)
3865+ENTRY(___copy_from_user)
3866
3867 #include "copy_template.S"
3868
3869-ENDPROC(__copy_from_user)
3870+ENDPROC(___copy_from_user)
3871
3872 .pushsection .fixup,"ax"
3873 .align 0
3874diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3875index 6ee2f67..d1cce76 100644
3876--- a/arch/arm/lib/copy_page.S
3877+++ b/arch/arm/lib/copy_page.S
3878@@ -10,6 +10,7 @@
3879 * ASM optimised string functions
3880 */
3881 #include <linux/linkage.h>
3882+#include <linux/const.h>
3883 #include <asm/assembler.h>
3884 #include <asm/asm-offsets.h>
3885 #include <asm/cache.h>
3886diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3887index d066df6..df28194 100644
3888--- a/arch/arm/lib/copy_to_user.S
3889+++ b/arch/arm/lib/copy_to_user.S
3890@@ -16,7 +16,7 @@
3891 /*
3892 * Prototype:
3893 *
3894- * size_t __copy_to_user(void *to, const void *from, size_t n)
3895+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3896 *
3897 * Purpose:
3898 *
3899@@ -88,11 +88,11 @@
3900 .text
3901
3902 ENTRY(__copy_to_user_std)
3903-WEAK(__copy_to_user)
3904+WEAK(___copy_to_user)
3905
3906 #include "copy_template.S"
3907
3908-ENDPROC(__copy_to_user)
3909+ENDPROC(___copy_to_user)
3910 ENDPROC(__copy_to_user_std)
3911
3912 .pushsection .fixup,"ax"
3913diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3914index 7d08b43..f7ca7ea 100644
3915--- a/arch/arm/lib/csumpartialcopyuser.S
3916+++ b/arch/arm/lib/csumpartialcopyuser.S
3917@@ -57,8 +57,8 @@
3918 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3919 */
3920
3921-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3922-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3923+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3924+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3925
3926 #include "csumpartialcopygeneric.S"
3927
3928diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3929index 5306de3..aed6d03 100644
3930--- a/arch/arm/lib/delay.c
3931+++ b/arch/arm/lib/delay.c
3932@@ -28,7 +28,7 @@
3933 /*
3934 * Default to the loop-based delay implementation.
3935 */
3936-struct arm_delay_ops arm_delay_ops = {
3937+struct arm_delay_ops arm_delay_ops __read_only = {
3938 .delay = __loop_delay,
3939 .const_udelay = __loop_const_udelay,
3940 .udelay = __loop_udelay,
3941diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3942index 3e58d71..029817c 100644
3943--- a/arch/arm/lib/uaccess_with_memcpy.c
3944+++ b/arch/arm/lib/uaccess_with_memcpy.c
3945@@ -136,7 +136,7 @@ out:
3946 }
3947
3948 unsigned long
3949-__copy_to_user(void __user *to, const void *from, unsigned long n)
3950+___copy_to_user(void __user *to, const void *from, unsigned long n)
3951 {
3952 /*
3953 * This test is stubbed out of the main function above to keep
3954@@ -190,7 +190,7 @@ out:
3955 return n;
3956 }
3957
3958-unsigned long __clear_user(void __user *addr, unsigned long n)
3959+unsigned long ___clear_user(void __user *addr, unsigned long n)
3960 {
3961 /* See rational for this in __copy_to_user() above. */
3962 if (n < 64)
3963diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3964index f7a07a5..258e1f7 100644
3965--- a/arch/arm/mach-at91/setup.c
3966+++ b/arch/arm/mach-at91/setup.c
3967@@ -81,7 +81,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3968
3969 desc->pfn = __phys_to_pfn(base);
3970 desc->length = length;
3971- desc->type = MT_MEMORY_RWX_NONCACHED;
3972+ desc->type = MT_MEMORY_RW_NONCACHED;
3973
3974 pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3975 base, length, desc->virtual);
3976diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3977index 255f33a..507b157 100644
3978--- a/arch/arm/mach-kirkwood/common.c
3979+++ b/arch/arm/mach-kirkwood/common.c
3980@@ -157,7 +157,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3981 clk_gate_ops.disable(hw);
3982 }
3983
3984-static struct clk_ops clk_gate_fn_ops;
3985+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3986+{
3987+ return clk_gate_ops.is_enabled(hw);
3988+}
3989+
3990+static struct clk_ops clk_gate_fn_ops = {
3991+ .enable = clk_gate_fn_enable,
3992+ .disable = clk_gate_fn_disable,
3993+ .is_enabled = clk_gate_fn_is_enabled,
3994+};
3995
3996 static struct clk __init *clk_register_gate_fn(struct device *dev,
3997 const char *name,
3998@@ -191,14 +200,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3999 gate_fn->fn_en = fn_en;
4000 gate_fn->fn_dis = fn_dis;
4001
4002- /* ops is the gate ops, but with our enable/disable functions */
4003- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
4004- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
4005- clk_gate_fn_ops = clk_gate_ops;
4006- clk_gate_fn_ops.enable = clk_gate_fn_enable;
4007- clk_gate_fn_ops.disable = clk_gate_fn_disable;
4008- }
4009-
4010 clk = clk_register(dev, &gate_fn->gate.hw);
4011
4012 if (IS_ERR(clk))
4013diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
4014index aead77a..a2253fa 100644
4015--- a/arch/arm/mach-omap2/board-n8x0.c
4016+++ b/arch/arm/mach-omap2/board-n8x0.c
4017@@ -568,7 +568,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
4018 }
4019 #endif
4020
4021-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
4022+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
4023 .late_init = n8x0_menelaus_late_init,
4024 };
4025
4026diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
4027index 8bc1338..8b28b69 100644
4028--- a/arch/arm/mach-omap2/gpmc.c
4029+++ b/arch/arm/mach-omap2/gpmc.c
4030@@ -151,7 +151,6 @@ struct omap3_gpmc_regs {
4031 };
4032
4033 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
4034-static struct irq_chip gpmc_irq_chip;
4035 static int gpmc_irq_start;
4036
4037 static struct resource gpmc_mem_root;
4038@@ -736,6 +735,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
4039
4040 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
4041
4042+static struct irq_chip gpmc_irq_chip = {
4043+ .name = "gpmc",
4044+ .irq_startup = gpmc_irq_noop_ret,
4045+ .irq_enable = gpmc_irq_enable,
4046+ .irq_disable = gpmc_irq_disable,
4047+ .irq_shutdown = gpmc_irq_noop,
4048+ .irq_ack = gpmc_irq_noop,
4049+ .irq_mask = gpmc_irq_noop,
4050+ .irq_unmask = gpmc_irq_noop,
4051+
4052+};
4053+
4054 static int gpmc_setup_irq(void)
4055 {
4056 int i;
4057@@ -750,15 +761,6 @@ static int gpmc_setup_irq(void)
4058 return gpmc_irq_start;
4059 }
4060
4061- gpmc_irq_chip.name = "gpmc";
4062- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
4063- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
4064- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
4065- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
4066- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
4067- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
4068- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
4069-
4070 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
4071 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
4072
4073diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
4074index 4001325..b14e2a0 100644
4075--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
4076+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
4077@@ -84,7 +84,7 @@ struct cpu_pm_ops {
4078 int (*finish_suspend)(unsigned long cpu_state);
4079 void (*resume)(void);
4080 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
4081-};
4082+} __no_const;
4083
4084 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
4085 static struct powerdomain *mpuss_pd;
4086@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
4087 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
4088 {}
4089
4090-struct cpu_pm_ops omap_pm_ops = {
4091+static struct cpu_pm_ops omap_pm_ops __read_only = {
4092 .finish_suspend = default_finish_suspend,
4093 .resume = dummy_cpu_resume,
4094 .scu_prepare = dummy_scu_prepare,
4095diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
4096index 37843a7..a98df13 100644
4097--- a/arch/arm/mach-omap2/omap-wakeupgen.c
4098+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
4099@@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
4100 return NOTIFY_OK;
4101 }
4102
4103-static struct notifier_block __refdata irq_hotplug_notifier = {
4104+static struct notifier_block irq_hotplug_notifier = {
4105 .notifier_call = irq_cpu_hotplug_notify,
4106 };
4107
4108diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
4109index 01ef59d..32ae28a8 100644
4110--- a/arch/arm/mach-omap2/omap_device.c
4111+++ b/arch/arm/mach-omap2/omap_device.c
4112@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
4113 struct platform_device __init *omap_device_build(const char *pdev_name,
4114 int pdev_id,
4115 struct omap_hwmod *oh,
4116- void *pdata, int pdata_len)
4117+ const void *pdata, int pdata_len)
4118 {
4119 struct omap_hwmod *ohs[] = { oh };
4120
4121@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
4122 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
4123 int pdev_id,
4124 struct omap_hwmod **ohs,
4125- int oh_cnt, void *pdata,
4126+ int oh_cnt, const void *pdata,
4127 int pdata_len)
4128 {
4129 int ret = -ENOMEM;
4130diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
4131index 78c02b3..c94109a 100644
4132--- a/arch/arm/mach-omap2/omap_device.h
4133+++ b/arch/arm/mach-omap2/omap_device.h
4134@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
4135 /* Core code interface */
4136
4137 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
4138- struct omap_hwmod *oh, void *pdata,
4139+ struct omap_hwmod *oh, const void *pdata,
4140 int pdata_len);
4141
4142 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
4143 struct omap_hwmod **oh, int oh_cnt,
4144- void *pdata, int pdata_len);
4145+ const void *pdata, int pdata_len);
4146
4147 struct omap_device *omap_device_alloc(struct platform_device *pdev,
4148 struct omap_hwmod **ohs, int oh_cnt);
4149diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
4150index 6c074f3..cd58cb7 100644
4151--- a/arch/arm/mach-omap2/omap_hwmod.c
4152+++ b/arch/arm/mach-omap2/omap_hwmod.c
4153@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
4154 int (*init_clkdm)(struct omap_hwmod *oh);
4155 void (*update_context_lost)(struct omap_hwmod *oh);
4156 int (*get_context_lost)(struct omap_hwmod *oh);
4157-};
4158+} __no_const;
4159
4160 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
4161-static struct omap_hwmod_soc_ops soc_ops;
4162+static struct omap_hwmod_soc_ops soc_ops __read_only;
4163
4164 /* omap_hwmod_list contains all registered struct omap_hwmods */
4165 static LIST_HEAD(omap_hwmod_list);
4166diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
4167index 95fee54..cfa9cf1 100644
4168--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
4169+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
4170@@ -10,6 +10,7 @@
4171
4172 #include <linux/kernel.h>
4173 #include <linux/init.h>
4174+#include <asm/pgtable.h>
4175
4176 #include "powerdomain.h"
4177
4178@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
4179
4180 void __init am43xx_powerdomains_init(void)
4181 {
4182- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
4183+ pax_open_kernel();
4184+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
4185+ pax_close_kernel();
4186 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
4187 pwrdm_register_pwrdms(powerdomains_am43xx);
4188 pwrdm_complete_init();
4189diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
4190index 97d6607..8429d14 100644
4191--- a/arch/arm/mach-omap2/wd_timer.c
4192+++ b/arch/arm/mach-omap2/wd_timer.c
4193@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
4194 struct omap_hwmod *oh;
4195 char *oh_name = "wd_timer2";
4196 char *dev_name = "omap_wdt";
4197- struct omap_wd_timer_platform_data pdata;
4198+ static struct omap_wd_timer_platform_data pdata = {
4199+ .read_reset_sources = prm_read_reset_sources
4200+ };
4201
4202 if (!cpu_class_is_omap2() || of_have_populated_dt())
4203 return 0;
4204@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
4205 return -EINVAL;
4206 }
4207
4208- pdata.read_reset_sources = prm_read_reset_sources;
4209-
4210 pdev = omap_device_build(dev_name, id, oh, &pdata,
4211 sizeof(struct omap_wd_timer_platform_data));
4212 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
4213diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
4214index b82dcae..44ee5b6 100644
4215--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
4216+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
4217@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
4218 bool entered_lp2 = false;
4219
4220 if (tegra_pending_sgi())
4221- ACCESS_ONCE(abort_flag) = true;
4222+ ACCESS_ONCE_RW(abort_flag) = true;
4223
4224 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
4225
4226diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
4227index 2dea8b5..6499da2 100644
4228--- a/arch/arm/mach-ux500/setup.h
4229+++ b/arch/arm/mach-ux500/setup.h
4230@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
4231 .type = MT_DEVICE, \
4232 }
4233
4234-#define __MEM_DEV_DESC(x, sz) { \
4235- .virtual = IO_ADDRESS(x), \
4236- .pfn = __phys_to_pfn(x), \
4237- .length = sz, \
4238- .type = MT_MEMORY_RWX, \
4239-}
4240-
4241 extern struct smp_operations ux500_smp_ops;
4242 extern void ux500_cpu_die(unsigned int cpu);
4243
4244diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
4245index c348eae..456a1a4 100644
4246--- a/arch/arm/mm/Kconfig
4247+++ b/arch/arm/mm/Kconfig
4248@@ -446,6 +446,7 @@ config CPU_32v5
4249
4250 config CPU_32v6
4251 bool
4252+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
4253 select TLS_REG_EMUL if !CPU_32v6K && !MMU
4254
4255 config CPU_32v6K
4256@@ -600,6 +601,7 @@ config CPU_CP15_MPU
4257
4258 config CPU_USE_DOMAINS
4259 bool
4260+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
4261 help
4262 This option enables or disables the use of domain switching
4263 via the set_fs() function.
4264@@ -799,6 +801,7 @@ config NEED_KUSER_HELPERS
4265 config KUSER_HELPERS
4266 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
4267 default y
4268+ depends on !(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND
4269 help
4270 Warning: disabling this option may break user programs.
4271
4272@@ -811,7 +814,7 @@ config KUSER_HELPERS
4273 See Documentation/arm/kernel_user_helpers.txt for details.
4274
4275 However, the fixed address nature of these helpers can be used
4276- by ROP (return orientated programming) authors when creating
4277+ by ROP (Return Oriented Programming) authors when creating
4278 exploits.
4279
4280 If all of the binaries and libraries which run on your platform
4281diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
4282index b8cb1a2..6a5624a 100644
4283--- a/arch/arm/mm/alignment.c
4284+++ b/arch/arm/mm/alignment.c
4285@@ -214,10 +214,12 @@ union offset_union {
4286 #define __get16_unaligned_check(ins,val,addr) \
4287 do { \
4288 unsigned int err = 0, v, a = addr; \
4289+ pax_open_userland(); \
4290 __get8_unaligned_check(ins,v,a,err); \
4291 val = v << ((BE) ? 8 : 0); \
4292 __get8_unaligned_check(ins,v,a,err); \
4293 val |= v << ((BE) ? 0 : 8); \
4294+ pax_close_userland(); \
4295 if (err) \
4296 goto fault; \
4297 } while (0)
4298@@ -231,6 +233,7 @@ union offset_union {
4299 #define __get32_unaligned_check(ins,val,addr) \
4300 do { \
4301 unsigned int err = 0, v, a = addr; \
4302+ pax_open_userland(); \
4303 __get8_unaligned_check(ins,v,a,err); \
4304 val = v << ((BE) ? 24 : 0); \
4305 __get8_unaligned_check(ins,v,a,err); \
4306@@ -239,6 +242,7 @@ union offset_union {
4307 val |= v << ((BE) ? 8 : 16); \
4308 __get8_unaligned_check(ins,v,a,err); \
4309 val |= v << ((BE) ? 0 : 24); \
4310+ pax_close_userland(); \
4311 if (err) \
4312 goto fault; \
4313 } while (0)
4314@@ -252,6 +256,7 @@ union offset_union {
4315 #define __put16_unaligned_check(ins,val,addr) \
4316 do { \
4317 unsigned int err = 0, v = val, a = addr; \
4318+ pax_open_userland(); \
4319 __asm__( FIRST_BYTE_16 \
4320 ARM( "1: "ins" %1, [%2], #1\n" ) \
4321 THUMB( "1: "ins" %1, [%2]\n" ) \
4322@@ -271,6 +276,7 @@ union offset_union {
4323 " .popsection\n" \
4324 : "=r" (err), "=&r" (v), "=&r" (a) \
4325 : "0" (err), "1" (v), "2" (a)); \
4326+ pax_close_userland(); \
4327 if (err) \
4328 goto fault; \
4329 } while (0)
4330@@ -284,6 +290,7 @@ union offset_union {
4331 #define __put32_unaligned_check(ins,val,addr) \
4332 do { \
4333 unsigned int err = 0, v = val, a = addr; \
4334+ pax_open_userland(); \
4335 __asm__( FIRST_BYTE_32 \
4336 ARM( "1: "ins" %1, [%2], #1\n" ) \
4337 THUMB( "1: "ins" %1, [%2]\n" ) \
4338@@ -313,6 +320,7 @@ union offset_union {
4339 " .popsection\n" \
4340 : "=r" (err), "=&r" (v), "=&r" (a) \
4341 : "0" (err), "1" (v), "2" (a)); \
4342+ pax_close_userland(); \
4343 if (err) \
4344 goto fault; \
4345 } while (0)
4346diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
4347index 7c3fb41..bfb87d8 100644
4348--- a/arch/arm/mm/cache-l2x0.c
4349+++ b/arch/arm/mm/cache-l2x0.c
4350@@ -41,7 +41,7 @@ struct l2c_init_data {
4351 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
4352 void (*save)(void __iomem *);
4353 struct outer_cache_fns outer_cache;
4354-};
4355+} __do_const;
4356
4357 #define CACHE_LINE_SIZE 32
4358
4359diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
4360index 6eb97b3..ac509f6 100644
4361--- a/arch/arm/mm/context.c
4362+++ b/arch/arm/mm/context.c
4363@@ -43,7 +43,7 @@
4364 #define NUM_USER_ASIDS ASID_FIRST_VERSION
4365
4366 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
4367-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4368+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4369 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
4370
4371 static DEFINE_PER_CPU(atomic64_t, active_asids);
4372@@ -182,7 +182,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4373 {
4374 static u32 cur_idx = 1;
4375 u64 asid = atomic64_read(&mm->context.id);
4376- u64 generation = atomic64_read(&asid_generation);
4377+ u64 generation = atomic64_read_unchecked(&asid_generation);
4378
4379 if (asid != 0 && is_reserved_asid(asid)) {
4380 /*
4381@@ -203,7 +203,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4382 */
4383 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
4384 if (asid == NUM_USER_ASIDS) {
4385- generation = atomic64_add_return(ASID_FIRST_VERSION,
4386+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
4387 &asid_generation);
4388 flush_context(cpu);
4389 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
4390@@ -234,14 +234,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
4391 cpu_set_reserved_ttbr0();
4392
4393 asid = atomic64_read(&mm->context.id);
4394- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
4395+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
4396 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
4397 goto switch_mm_fastpath;
4398
4399 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
4400 /* Check that our ASID belongs to the current generation. */
4401 asid = atomic64_read(&mm->context.id);
4402- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
4403+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
4404 asid = new_context(mm, cpu);
4405 atomic64_set(&mm->context.id, asid);
4406 }
4407diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
4408index eb8830a..5360ce7 100644
4409--- a/arch/arm/mm/fault.c
4410+++ b/arch/arm/mm/fault.c
4411@@ -25,6 +25,7 @@
4412 #include <asm/system_misc.h>
4413 #include <asm/system_info.h>
4414 #include <asm/tlbflush.h>
4415+#include <asm/sections.h>
4416
4417 #include "fault.h"
4418
4419@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
4420 if (fixup_exception(regs))
4421 return;
4422
4423+#ifdef CONFIG_PAX_MEMORY_UDEREF
4424+ if (addr < TASK_SIZE) {
4425+ if (current->signal->curr_ip)
4426+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4427+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4428+ else
4429+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4430+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4431+ }
4432+#endif
4433+
4434+#ifdef CONFIG_PAX_KERNEXEC
4435+ if ((fsr & FSR_WRITE) &&
4436+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
4437+ (MODULES_VADDR <= addr && addr < MODULES_END)))
4438+ {
4439+ if (current->signal->curr_ip)
4440+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4441+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4442+ else
4443+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
4444+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4445+ }
4446+#endif
4447+
4448 /*
4449 * No handler, we'll have to terminate things with extreme prejudice.
4450 */
4451@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
4452 }
4453 #endif
4454
4455+#ifdef CONFIG_PAX_PAGEEXEC
4456+ if (fsr & FSR_LNX_PF) {
4457+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
4458+ do_group_exit(SIGKILL);
4459+ }
4460+#endif
4461+
4462 tsk->thread.address = addr;
4463 tsk->thread.error_code = fsr;
4464 tsk->thread.trap_no = 14;
4465@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4466 }
4467 #endif /* CONFIG_MMU */
4468
4469+#ifdef CONFIG_PAX_PAGEEXEC
4470+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4471+{
4472+ long i;
4473+
4474+ printk(KERN_ERR "PAX: bytes at PC: ");
4475+ for (i = 0; i < 20; i++) {
4476+ unsigned char c;
4477+ if (get_user(c, (__force unsigned char __user *)pc+i))
4478+ printk(KERN_CONT "?? ");
4479+ else
4480+ printk(KERN_CONT "%02x ", c);
4481+ }
4482+ printk("\n");
4483+
4484+ printk(KERN_ERR "PAX: bytes at SP-4: ");
4485+ for (i = -1; i < 20; i++) {
4486+ unsigned long c;
4487+ if (get_user(c, (__force unsigned long __user *)sp+i))
4488+ printk(KERN_CONT "???????? ");
4489+ else
4490+ printk(KERN_CONT "%08lx ", c);
4491+ }
4492+ printk("\n");
4493+}
4494+#endif
4495+
4496 /*
4497 * First Level Translation Fault Handler
4498 *
4499@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4500 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
4501 struct siginfo info;
4502
4503+#ifdef CONFIG_PAX_MEMORY_UDEREF
4504+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4505+ if (current->signal->curr_ip)
4506+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4507+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4508+ else
4509+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4510+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4511+ goto die;
4512+ }
4513+#endif
4514+
4515 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4516 return;
4517
4518+die:
4519 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4520 inf->name, fsr, addr);
4521
4522@@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4523 ifsr_info[nr].name = name;
4524 }
4525
4526+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4527+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4528+
4529 asmlinkage void __exception
4530 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4531 {
4532 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4533 struct siginfo info;
4534+ unsigned long pc = instruction_pointer(regs);
4535+
4536+ if (user_mode(regs)) {
4537+ unsigned long sigpage = current->mm->context.sigpage;
4538+
4539+ if (sigpage <= pc && pc < sigpage + 7*4) {
4540+ if (pc < sigpage + 3*4)
4541+ sys_sigreturn(regs);
4542+ else
4543+ sys_rt_sigreturn(regs);
4544+ return;
4545+ }
4546+ if (pc == 0xffff0f60UL) {
4547+ /*
4548+ * PaX: __kuser_cmpxchg64 emulation
4549+ */
4550+ // TODO
4551+ //regs->ARM_pc = regs->ARM_lr;
4552+ //return;
4553+ }
4554+ if (pc == 0xffff0fa0UL) {
4555+ /*
4556+ * PaX: __kuser_memory_barrier emulation
4557+ */
4558+ // dmb(); implied by the exception
4559+ regs->ARM_pc = regs->ARM_lr;
4560+ return;
4561+ }
4562+ if (pc == 0xffff0fc0UL) {
4563+ /*
4564+ * PaX: __kuser_cmpxchg emulation
4565+ */
4566+ // TODO
4567+ //long new;
4568+ //int op;
4569+
4570+ //op = FUTEX_OP_SET << 28;
4571+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4572+ //regs->ARM_r0 = old != new;
4573+ //regs->ARM_pc = regs->ARM_lr;
4574+ //return;
4575+ }
4576+ if (pc == 0xffff0fe0UL) {
4577+ /*
4578+ * PaX: __kuser_get_tls emulation
4579+ */
4580+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4581+ regs->ARM_pc = regs->ARM_lr;
4582+ return;
4583+ }
4584+ }
4585+
4586+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4587+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4588+ if (current->signal->curr_ip)
4589+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4590+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4591+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4592+ else
4593+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4594+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4595+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4596+ goto die;
4597+ }
4598+#endif
4599+
4600+#ifdef CONFIG_PAX_REFCOUNT
4601+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4602+ unsigned int bkpt;
4603+
4604+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4605+ current->thread.error_code = ifsr;
4606+ current->thread.trap_no = 0;
4607+ pax_report_refcount_overflow(regs);
4608+ fixup_exception(regs);
4609+ return;
4610+ }
4611+ }
4612+#endif
4613
4614 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4615 return;
4616
4617+die:
4618 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4619 inf->name, ifsr, addr);
4620
4621diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4622index cf08bdf..772656c 100644
4623--- a/arch/arm/mm/fault.h
4624+++ b/arch/arm/mm/fault.h
4625@@ -3,6 +3,7 @@
4626
4627 /*
4628 * Fault status register encodings. We steal bit 31 for our own purposes.
4629+ * Set when the FSR value is from an instruction fault.
4630 */
4631 #define FSR_LNX_PF (1 << 31)
4632 #define FSR_WRITE (1 << 11)
4633@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4634 }
4635 #endif
4636
4637+/* valid for LPAE and !LPAE */
4638+static inline int is_xn_fault(unsigned int fsr)
4639+{
4640+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4641+}
4642+
4643+static inline int is_domain_fault(unsigned int fsr)
4644+{
4645+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4646+}
4647+
4648 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4649 unsigned long search_exception_table(unsigned long addr);
4650
4651diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4652index 659c75d..6f8c029 100644
4653--- a/arch/arm/mm/init.c
4654+++ b/arch/arm/mm/init.c
4655@@ -31,6 +31,8 @@
4656 #include <asm/setup.h>
4657 #include <asm/tlb.h>
4658 #include <asm/fixmap.h>
4659+#include <asm/system_info.h>
4660+#include <asm/cp15.h>
4661
4662 #include <asm/mach/arch.h>
4663 #include <asm/mach/map.h>
4664@@ -619,7 +621,46 @@ void free_initmem(void)
4665 {
4666 #ifdef CONFIG_HAVE_TCM
4667 extern char __tcm_start, __tcm_end;
4668+#endif
4669
4670+#ifdef CONFIG_PAX_KERNEXEC
4671+ unsigned long addr;
4672+ pgd_t *pgd;
4673+ pud_t *pud;
4674+ pmd_t *pmd;
4675+ int cpu_arch = cpu_architecture();
4676+ unsigned int cr = get_cr();
4677+
4678+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4679+ /* make pages tables, etc before .text NX */
4680+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4681+ pgd = pgd_offset_k(addr);
4682+ pud = pud_offset(pgd, addr);
4683+ pmd = pmd_offset(pud, addr);
4684+ __section_update(pmd, addr, PMD_SECT_XN);
4685+ }
4686+ /* make init NX */
4687+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4688+ pgd = pgd_offset_k(addr);
4689+ pud = pud_offset(pgd, addr);
4690+ pmd = pmd_offset(pud, addr);
4691+ __section_update(pmd, addr, PMD_SECT_XN);
4692+ }
4693+ /* make kernel code/rodata RX */
4694+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4695+ pgd = pgd_offset_k(addr);
4696+ pud = pud_offset(pgd, addr);
4697+ pmd = pmd_offset(pud, addr);
4698+#ifdef CONFIG_ARM_LPAE
4699+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4700+#else
4701+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4702+#endif
4703+ }
4704+ }
4705+#endif
4706+
4707+#ifdef CONFIG_HAVE_TCM
4708 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4709 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4710 #endif
4711diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4712index d1e5ad7..84dcbf2 100644
4713--- a/arch/arm/mm/ioremap.c
4714+++ b/arch/arm/mm/ioremap.c
4715@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4716 unsigned int mtype;
4717
4718 if (cached)
4719- mtype = MT_MEMORY_RWX;
4720+ mtype = MT_MEMORY_RX;
4721 else
4722- mtype = MT_MEMORY_RWX_NONCACHED;
4723+ mtype = MT_MEMORY_RX_NONCACHED;
4724
4725 return __arm_ioremap_caller(phys_addr, size, mtype,
4726 __builtin_return_address(0));
4727diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4728index 5e85ed3..b10a7ed 100644
4729--- a/arch/arm/mm/mmap.c
4730+++ b/arch/arm/mm/mmap.c
4731@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4732 struct vm_area_struct *vma;
4733 int do_align = 0;
4734 int aliasing = cache_is_vipt_aliasing();
4735+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4736 struct vm_unmapped_area_info info;
4737
4738 /*
4739@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4740 if (len > TASK_SIZE)
4741 return -ENOMEM;
4742
4743+#ifdef CONFIG_PAX_RANDMMAP
4744+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4745+#endif
4746+
4747 if (addr) {
4748 if (do_align)
4749 addr = COLOUR_ALIGN(addr, pgoff);
4750@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4751 addr = PAGE_ALIGN(addr);
4752
4753 vma = find_vma(mm, addr);
4754- if (TASK_SIZE - len >= addr &&
4755- (!vma || addr + len <= vma->vm_start))
4756+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4757 return addr;
4758 }
4759
4760@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4761 info.high_limit = TASK_SIZE;
4762 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4763 info.align_offset = pgoff << PAGE_SHIFT;
4764+ info.threadstack_offset = offset;
4765 return vm_unmapped_area(&info);
4766 }
4767
4768@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4769 unsigned long addr = addr0;
4770 int do_align = 0;
4771 int aliasing = cache_is_vipt_aliasing();
4772+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4773 struct vm_unmapped_area_info info;
4774
4775 /*
4776@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4777 return addr;
4778 }
4779
4780+#ifdef CONFIG_PAX_RANDMMAP
4781+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4782+#endif
4783+
4784 /* requesting a specific address */
4785 if (addr) {
4786 if (do_align)
4787@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4788 else
4789 addr = PAGE_ALIGN(addr);
4790 vma = find_vma(mm, addr);
4791- if (TASK_SIZE - len >= addr &&
4792- (!vma || addr + len <= vma->vm_start))
4793+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4794 return addr;
4795 }
4796
4797@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4798 info.high_limit = mm->mmap_base;
4799 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4800 info.align_offset = pgoff << PAGE_SHIFT;
4801+ info.threadstack_offset = offset;
4802 addr = vm_unmapped_area(&info);
4803
4804 /*
4805@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4806 {
4807 unsigned long random_factor = 0UL;
4808
4809+#ifdef CONFIG_PAX_RANDMMAP
4810+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4811+#endif
4812+
4813 /* 8 bits of randomness in 20 address space bits */
4814 if ((current->flags & PF_RANDOMIZE) &&
4815 !(current->personality & ADDR_NO_RANDOMIZE))
4816@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4817
4818 if (mmap_is_legacy()) {
4819 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4820+
4821+#ifdef CONFIG_PAX_RANDMMAP
4822+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4823+ mm->mmap_base += mm->delta_mmap;
4824+#endif
4825+
4826 mm->get_unmapped_area = arch_get_unmapped_area;
4827 } else {
4828 mm->mmap_base = mmap_base(random_factor);
4829+
4830+#ifdef CONFIG_PAX_RANDMMAP
4831+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4832+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4833+#endif
4834+
4835 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4836 }
4837 }
4838diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4839index 6e3ba8d..9cbb4d7 100644
4840--- a/arch/arm/mm/mmu.c
4841+++ b/arch/arm/mm/mmu.c
4842@@ -40,6 +40,22 @@
4843 #include "mm.h"
4844 #include "tcm.h"
4845
4846+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4847+void modify_domain(unsigned int dom, unsigned int type)
4848+{
4849+ struct thread_info *thread = current_thread_info();
4850+ unsigned int domain = thread->cpu_domain;
4851+ /*
4852+ * DOMAIN_MANAGER might be defined to some other value,
4853+ * use the arch-defined constant
4854+ */
4855+ domain &= ~domain_val(dom, 3);
4856+ thread->cpu_domain = domain | domain_val(dom, type);
4857+ set_domain(thread->cpu_domain);
4858+}
4859+EXPORT_SYMBOL(modify_domain);
4860+#endif
4861+
4862 /*
4863 * empty_zero_page is a special page that is used for
4864 * zero-initialized data and COW.
4865@@ -239,7 +255,15 @@ __setup("noalign", noalign_setup);
4866 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4867 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4868
4869-static struct mem_type mem_types[] = {
4870+#ifdef CONFIG_PAX_KERNEXEC
4871+#define L_PTE_KERNEXEC L_PTE_RDONLY
4872+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4873+#else
4874+#define L_PTE_KERNEXEC L_PTE_DIRTY
4875+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4876+#endif
4877+
4878+static struct mem_type mem_types[] __read_only = {
4879 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4880 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4881 L_PTE_SHARED,
4882@@ -268,19 +292,19 @@ static struct mem_type mem_types[] = {
4883 .prot_sect = PROT_SECT_DEVICE,
4884 .domain = DOMAIN_IO,
4885 },
4886- [MT_UNCACHED] = {
4887+ [MT_UNCACHED_RW] = {
4888 .prot_pte = PROT_PTE_DEVICE,
4889 .prot_l1 = PMD_TYPE_TABLE,
4890 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4891 .domain = DOMAIN_IO,
4892 },
4893- [MT_CACHECLEAN] = {
4894- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4895+ [MT_CACHECLEAN_RO] = {
4896+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4897 .domain = DOMAIN_KERNEL,
4898 },
4899 #ifndef CONFIG_ARM_LPAE
4900- [MT_MINICLEAN] = {
4901- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4902+ [MT_MINICLEAN_RO] = {
4903+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4904 .domain = DOMAIN_KERNEL,
4905 },
4906 #endif
4907@@ -288,15 +312,15 @@ static struct mem_type mem_types[] = {
4908 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4909 L_PTE_RDONLY,
4910 .prot_l1 = PMD_TYPE_TABLE,
4911- .domain = DOMAIN_USER,
4912+ .domain = DOMAIN_VECTORS,
4913 },
4914 [MT_HIGH_VECTORS] = {
4915 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4916 L_PTE_USER | L_PTE_RDONLY,
4917 .prot_l1 = PMD_TYPE_TABLE,
4918- .domain = DOMAIN_USER,
4919+ .domain = DOMAIN_VECTORS,
4920 },
4921- [MT_MEMORY_RWX] = {
4922+ [__MT_MEMORY_RWX] = {
4923 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4924 .prot_l1 = PMD_TYPE_TABLE,
4925 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4926@@ -309,17 +333,30 @@ static struct mem_type mem_types[] = {
4927 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4928 .domain = DOMAIN_KERNEL,
4929 },
4930- [MT_ROM] = {
4931- .prot_sect = PMD_TYPE_SECT,
4932+ [MT_MEMORY_RX] = {
4933+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4934+ .prot_l1 = PMD_TYPE_TABLE,
4935+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4936+ .domain = DOMAIN_KERNEL,
4937+ },
4938+ [MT_ROM_RX] = {
4939+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4940 .domain = DOMAIN_KERNEL,
4941 },
4942- [MT_MEMORY_RWX_NONCACHED] = {
4943+ [MT_MEMORY_RW_NONCACHED] = {
4944 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4945 L_PTE_MT_BUFFERABLE,
4946 .prot_l1 = PMD_TYPE_TABLE,
4947 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4948 .domain = DOMAIN_KERNEL,
4949 },
4950+ [MT_MEMORY_RX_NONCACHED] = {
4951+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4952+ L_PTE_MT_BUFFERABLE,
4953+ .prot_l1 = PMD_TYPE_TABLE,
4954+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4955+ .domain = DOMAIN_KERNEL,
4956+ },
4957 [MT_MEMORY_RW_DTCM] = {
4958 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4959 L_PTE_XN,
4960@@ -327,9 +364,10 @@ static struct mem_type mem_types[] = {
4961 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4962 .domain = DOMAIN_KERNEL,
4963 },
4964- [MT_MEMORY_RWX_ITCM] = {
4965- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4966+ [MT_MEMORY_RX_ITCM] = {
4967+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4968 .prot_l1 = PMD_TYPE_TABLE,
4969+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4970 .domain = DOMAIN_KERNEL,
4971 },
4972 [MT_MEMORY_RW_SO] = {
4973@@ -547,9 +585,14 @@ static void __init build_mem_type_table(void)
4974 * Mark cache clean areas and XIP ROM read only
4975 * from SVC mode and no access from userspace.
4976 */
4977- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4978- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4979- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4980+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4981+#ifdef CONFIG_PAX_KERNEXEC
4982+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4983+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4984+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4985+#endif
4986+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4987+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4988 #endif
4989
4990 /*
4991@@ -566,13 +609,17 @@ static void __init build_mem_type_table(void)
4992 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4993 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4994 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4995- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4996- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4997+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4998+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4999 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
5000 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
5001+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
5002+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
5003 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
5004- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
5005- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
5006+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
5007+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
5008+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
5009+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
5010 }
5011 }
5012
5013@@ -583,15 +630,20 @@ static void __init build_mem_type_table(void)
5014 if (cpu_arch >= CPU_ARCH_ARMv6) {
5015 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
5016 /* Non-cacheable Normal is XCB = 001 */
5017- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
5018+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
5019+ PMD_SECT_BUFFERED;
5020+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
5021 PMD_SECT_BUFFERED;
5022 } else {
5023 /* For both ARMv6 and non-TEX-remapping ARMv7 */
5024- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
5025+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
5026+ PMD_SECT_TEX(1);
5027+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
5028 PMD_SECT_TEX(1);
5029 }
5030 } else {
5031- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
5032+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
5033+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
5034 }
5035
5036 #ifdef CONFIG_ARM_LPAE
5037@@ -607,6 +659,8 @@ static void __init build_mem_type_table(void)
5038 vecs_pgprot |= PTE_EXT_AF;
5039 #endif
5040
5041+ user_pgprot |= __supported_pte_mask;
5042+
5043 for (i = 0; i < 16; i++) {
5044 pteval_t v = pgprot_val(protection_map[i]);
5045 protection_map[i] = __pgprot(v | user_pgprot);
5046@@ -624,21 +678,24 @@ static void __init build_mem_type_table(void)
5047
5048 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
5049 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
5050- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
5051- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
5052+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
5053+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
5054 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
5055 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
5056+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
5057+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
5058 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
5059- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
5060- mem_types[MT_ROM].prot_sect |= cp->pmd;
5061+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
5062+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
5063+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
5064
5065 switch (cp->pmd) {
5066 case PMD_SECT_WT:
5067- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
5068+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
5069 break;
5070 case PMD_SECT_WB:
5071 case PMD_SECT_WBWA:
5072- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
5073+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
5074 break;
5075 }
5076 pr_info("Memory policy: %sData cache %s\n",
5077@@ -856,7 +913,7 @@ static void __init create_mapping(struct map_desc *md)
5078 return;
5079 }
5080
5081- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
5082+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
5083 md->virtual >= PAGE_OFFSET &&
5084 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
5085 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
5086@@ -1224,18 +1281,15 @@ void __init arm_mm_memblock_reserve(void)
5087 * called function. This means you can't use any function or debugging
5088 * method which may touch any device, otherwise the kernel _will_ crash.
5089 */
5090+
5091+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
5092+
5093 static void __init devicemaps_init(const struct machine_desc *mdesc)
5094 {
5095 struct map_desc map;
5096 unsigned long addr;
5097- void *vectors;
5098
5099- /*
5100- * Allocate the vector page early.
5101- */
5102- vectors = early_alloc(PAGE_SIZE * 2);
5103-
5104- early_trap_init(vectors);
5105+ early_trap_init(&vectors);
5106
5107 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
5108 pmd_clear(pmd_off_k(addr));
5109@@ -1248,7 +1302,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
5110 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
5111 map.virtual = MODULES_VADDR;
5112 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
5113- map.type = MT_ROM;
5114+ map.type = MT_ROM_RX;
5115 create_mapping(&map);
5116 #endif
5117
5118@@ -1259,14 +1313,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
5119 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
5120 map.virtual = FLUSH_BASE;
5121 map.length = SZ_1M;
5122- map.type = MT_CACHECLEAN;
5123+ map.type = MT_CACHECLEAN_RO;
5124 create_mapping(&map);
5125 #endif
5126 #ifdef FLUSH_BASE_MINICACHE
5127 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
5128 map.virtual = FLUSH_BASE_MINICACHE;
5129 map.length = SZ_1M;
5130- map.type = MT_MINICLEAN;
5131+ map.type = MT_MINICLEAN_RO;
5132 create_mapping(&map);
5133 #endif
5134
5135@@ -1275,7 +1329,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
5136 * location (0xffff0000). If we aren't using high-vectors, also
5137 * create a mapping at the low-vectors virtual address.
5138 */
5139- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
5140+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
5141 map.virtual = 0xffff0000;
5142 map.length = PAGE_SIZE;
5143 #ifdef CONFIG_KUSER_HELPERS
5144@@ -1335,8 +1389,10 @@ static void __init kmap_init(void)
5145 static void __init map_lowmem(void)
5146 {
5147 struct memblock_region *reg;
5148+#ifndef CONFIG_PAX_KERNEXEC
5149 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
5150 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
5151+#endif
5152
5153 /* Map all the lowmem memory banks. */
5154 for_each_memblock(memory, reg) {
5155@@ -1349,11 +1405,48 @@ static void __init map_lowmem(void)
5156 if (start >= end)
5157 break;
5158
5159+#ifdef CONFIG_PAX_KERNEXEC
5160+ map.pfn = __phys_to_pfn(start);
5161+ map.virtual = __phys_to_virt(start);
5162+ map.length = end - start;
5163+
5164+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
5165+ struct map_desc kernel;
5166+ struct map_desc initmap;
5167+
5168+ /* when freeing initmem we will make this RW */
5169+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
5170+ initmap.virtual = (unsigned long)__init_begin;
5171+ initmap.length = _sdata - __init_begin;
5172+ initmap.type = __MT_MEMORY_RWX;
5173+ create_mapping(&initmap);
5174+
5175+ /* when freeing initmem we will make this RX */
5176+ kernel.pfn = __phys_to_pfn(__pa(_stext));
5177+ kernel.virtual = (unsigned long)_stext;
5178+ kernel.length = __init_begin - _stext;
5179+ kernel.type = __MT_MEMORY_RWX;
5180+ create_mapping(&kernel);
5181+
5182+ if (map.virtual < (unsigned long)_stext) {
5183+ map.length = (unsigned long)_stext - map.virtual;
5184+ map.type = __MT_MEMORY_RWX;
5185+ create_mapping(&map);
5186+ }
5187+
5188+ map.pfn = __phys_to_pfn(__pa(_sdata));
5189+ map.virtual = (unsigned long)_sdata;
5190+ map.length = end - __pa(_sdata);
5191+ }
5192+
5193+ map.type = MT_MEMORY_RW;
5194+ create_mapping(&map);
5195+#else
5196 if (end < kernel_x_start || start >= kernel_x_end) {
5197 map.pfn = __phys_to_pfn(start);
5198 map.virtual = __phys_to_virt(start);
5199 map.length = end - start;
5200- map.type = MT_MEMORY_RWX;
5201+ map.type = __MT_MEMORY_RWX;
5202
5203 create_mapping(&map);
5204 } else {
5205@@ -1370,7 +1463,7 @@ static void __init map_lowmem(void)
5206 map.pfn = __phys_to_pfn(kernel_x_start);
5207 map.virtual = __phys_to_virt(kernel_x_start);
5208 map.length = kernel_x_end - kernel_x_start;
5209- map.type = MT_MEMORY_RWX;
5210+ map.type = __MT_MEMORY_RWX;
5211
5212 create_mapping(&map);
5213
5214@@ -1383,6 +1476,7 @@ static void __init map_lowmem(void)
5215 create_mapping(&map);
5216 }
5217 }
5218+#endif
5219 }
5220 }
5221
5222diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
5223index 5b217f4..c23f40e 100644
5224--- a/arch/arm/plat-iop/setup.c
5225+++ b/arch/arm/plat-iop/setup.c
5226@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
5227 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
5228 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
5229 .length = IOP3XX_PERIPHERAL_SIZE,
5230- .type = MT_UNCACHED,
5231+ .type = MT_UNCACHED_RW,
5232 },
5233 };
5234
5235diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
5236index a5bc92d..0bb4730 100644
5237--- a/arch/arm/plat-omap/sram.c
5238+++ b/arch/arm/plat-omap/sram.c
5239@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
5240 * Looks like we need to preserve some bootloader code at the
5241 * beginning of SRAM for jumping to flash for reboot to work...
5242 */
5243+ pax_open_kernel();
5244 memset_io(omap_sram_base + omap_sram_skip, 0,
5245 omap_sram_size - omap_sram_skip);
5246+ pax_close_kernel();
5247 }
5248diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
5249index ce6d763..cfea917 100644
5250--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
5251+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
5252@@ -47,7 +47,7 @@ struct samsung_dma_ops {
5253 int (*started)(unsigned ch);
5254 int (*flush)(unsigned ch);
5255 int (*stop)(unsigned ch);
5256-};
5257+} __no_const;
5258
5259 extern void *samsung_dmadev_get_ops(void);
5260 extern void *s3c_dma_get_ops(void);
5261diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
5262index 6389d60..b5d3bdd 100644
5263--- a/arch/arm64/include/asm/barrier.h
5264+++ b/arch/arm64/include/asm/barrier.h
5265@@ -41,7 +41,7 @@
5266 do { \
5267 compiletime_assert_atomic_type(*p); \
5268 barrier(); \
5269- ACCESS_ONCE(*p) = (v); \
5270+ ACCESS_ONCE_RW(*p) = (v); \
5271 } while (0)
5272
5273 #define smp_load_acquire(p) \
5274diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
5275index 3bf8f4e..5dd5491 100644
5276--- a/arch/arm64/include/asm/uaccess.h
5277+++ b/arch/arm64/include/asm/uaccess.h
5278@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
5279 flag; \
5280 })
5281
5282+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5283 #define access_ok(type, addr, size) __range_ok(addr, size)
5284 #define user_addr_max get_fs
5285
5286diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
5287index c3a58a1..78fbf54 100644
5288--- a/arch/avr32/include/asm/cache.h
5289+++ b/arch/avr32/include/asm/cache.h
5290@@ -1,8 +1,10 @@
5291 #ifndef __ASM_AVR32_CACHE_H
5292 #define __ASM_AVR32_CACHE_H
5293
5294+#include <linux/const.h>
5295+
5296 #define L1_CACHE_SHIFT 5
5297-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5298+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5299
5300 /*
5301 * Memory returned by kmalloc() may be used for DMA, so we must make
5302diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
5303index d232888..87c8df1 100644
5304--- a/arch/avr32/include/asm/elf.h
5305+++ b/arch/avr32/include/asm/elf.h
5306@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
5307 the loader. We need to make sure that it is out of the way of the program
5308 that it will "exec", and that there is sufficient room for the brk. */
5309
5310-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
5311+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5312
5313+#ifdef CONFIG_PAX_ASLR
5314+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
5315+
5316+#define PAX_DELTA_MMAP_LEN 15
5317+#define PAX_DELTA_STACK_LEN 15
5318+#endif
5319
5320 /* This yields a mask that user programs can use to figure out what
5321 instruction set this CPU supports. This could be done in user space,
5322diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
5323index 479330b..53717a8 100644
5324--- a/arch/avr32/include/asm/kmap_types.h
5325+++ b/arch/avr32/include/asm/kmap_types.h
5326@@ -2,9 +2,9 @@
5327 #define __ASM_AVR32_KMAP_TYPES_H
5328
5329 #ifdef CONFIG_DEBUG_HIGHMEM
5330-# define KM_TYPE_NR 29
5331+# define KM_TYPE_NR 30
5332 #else
5333-# define KM_TYPE_NR 14
5334+# define KM_TYPE_NR 15
5335 #endif
5336
5337 #endif /* __ASM_AVR32_KMAP_TYPES_H */
5338diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
5339index 0eca933..eb78c7b 100644
5340--- a/arch/avr32/mm/fault.c
5341+++ b/arch/avr32/mm/fault.c
5342@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
5343
5344 int exception_trace = 1;
5345
5346+#ifdef CONFIG_PAX_PAGEEXEC
5347+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5348+{
5349+ unsigned long i;
5350+
5351+ printk(KERN_ERR "PAX: bytes at PC: ");
5352+ for (i = 0; i < 20; i++) {
5353+ unsigned char c;
5354+ if (get_user(c, (unsigned char *)pc+i))
5355+ printk(KERN_CONT "???????? ");
5356+ else
5357+ printk(KERN_CONT "%02x ", c);
5358+ }
5359+ printk("\n");
5360+}
5361+#endif
5362+
5363 /*
5364 * This routine handles page faults. It determines the address and the
5365 * problem, and then passes it off to one of the appropriate routines.
5366@@ -176,6 +193,16 @@ bad_area:
5367 up_read(&mm->mmap_sem);
5368
5369 if (user_mode(regs)) {
5370+
5371+#ifdef CONFIG_PAX_PAGEEXEC
5372+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
5373+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
5374+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
5375+ do_group_exit(SIGKILL);
5376+ }
5377+ }
5378+#endif
5379+
5380 if (exception_trace && printk_ratelimit())
5381 printk("%s%s[%d]: segfault at %08lx pc %08lx "
5382 "sp %08lx ecr %lu\n",
5383diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
5384index 568885a..f8008df 100644
5385--- a/arch/blackfin/include/asm/cache.h
5386+++ b/arch/blackfin/include/asm/cache.h
5387@@ -7,6 +7,7 @@
5388 #ifndef __ARCH_BLACKFIN_CACHE_H
5389 #define __ARCH_BLACKFIN_CACHE_H
5390
5391+#include <linux/const.h>
5392 #include <linux/linkage.h> /* for asmlinkage */
5393
5394 /*
5395@@ -14,7 +15,7 @@
5396 * Blackfin loads 32 bytes for cache
5397 */
5398 #define L1_CACHE_SHIFT 5
5399-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5400+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5401 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5402
5403 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5404diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
5405index aea2718..3639a60 100644
5406--- a/arch/cris/include/arch-v10/arch/cache.h
5407+++ b/arch/cris/include/arch-v10/arch/cache.h
5408@@ -1,8 +1,9 @@
5409 #ifndef _ASM_ARCH_CACHE_H
5410 #define _ASM_ARCH_CACHE_H
5411
5412+#include <linux/const.h>
5413 /* Etrax 100LX have 32-byte cache-lines. */
5414-#define L1_CACHE_BYTES 32
5415 #define L1_CACHE_SHIFT 5
5416+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5417
5418 #endif /* _ASM_ARCH_CACHE_H */
5419diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5420index 7caf25d..ee65ac5 100644
5421--- a/arch/cris/include/arch-v32/arch/cache.h
5422+++ b/arch/cris/include/arch-v32/arch/cache.h
5423@@ -1,11 +1,12 @@
5424 #ifndef _ASM_CRIS_ARCH_CACHE_H
5425 #define _ASM_CRIS_ARCH_CACHE_H
5426
5427+#include <linux/const.h>
5428 #include <arch/hwregs/dma.h>
5429
5430 /* A cache-line is 32 bytes. */
5431-#define L1_CACHE_BYTES 32
5432 #define L1_CACHE_SHIFT 5
5433+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5434
5435 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5436
5437diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5438index f6c3a16..cd422a4 100644
5439--- a/arch/frv/include/asm/atomic.h
5440+++ b/arch/frv/include/asm/atomic.h
5441@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5442 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5443 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5444
5445+#define atomic64_read_unchecked(v) atomic64_read(v)
5446+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5447+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5448+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5449+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5450+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5451+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5452+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5453+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5454+
5455 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5456 {
5457 int c, old;
5458diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5459index 2797163..c2a401d 100644
5460--- a/arch/frv/include/asm/cache.h
5461+++ b/arch/frv/include/asm/cache.h
5462@@ -12,10 +12,11 @@
5463 #ifndef __ASM_CACHE_H
5464 #define __ASM_CACHE_H
5465
5466+#include <linux/const.h>
5467
5468 /* bytes per L1 cache line */
5469 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5470-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5471+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5472
5473 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5474 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5475diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5476index 43901f2..0d8b865 100644
5477--- a/arch/frv/include/asm/kmap_types.h
5478+++ b/arch/frv/include/asm/kmap_types.h
5479@@ -2,6 +2,6 @@
5480 #ifndef _ASM_KMAP_TYPES_H
5481 #define _ASM_KMAP_TYPES_H
5482
5483-#define KM_TYPE_NR 17
5484+#define KM_TYPE_NR 18
5485
5486 #endif
5487diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5488index 836f147..4cf23f5 100644
5489--- a/arch/frv/mm/elf-fdpic.c
5490+++ b/arch/frv/mm/elf-fdpic.c
5491@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5492 {
5493 struct vm_area_struct *vma;
5494 struct vm_unmapped_area_info info;
5495+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5496
5497 if (len > TASK_SIZE)
5498 return -ENOMEM;
5499@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5500 if (addr) {
5501 addr = PAGE_ALIGN(addr);
5502 vma = find_vma(current->mm, addr);
5503- if (TASK_SIZE - len >= addr &&
5504- (!vma || addr + len <= vma->vm_start))
5505+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5506 goto success;
5507 }
5508
5509@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5510 info.high_limit = (current->mm->start_stack - 0x00200000);
5511 info.align_mask = 0;
5512 info.align_offset = 0;
5513+ info.threadstack_offset = offset;
5514 addr = vm_unmapped_area(&info);
5515 if (!(addr & ~PAGE_MASK))
5516 goto success;
5517diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5518index f4ca594..adc72fd6 100644
5519--- a/arch/hexagon/include/asm/cache.h
5520+++ b/arch/hexagon/include/asm/cache.h
5521@@ -21,9 +21,11 @@
5522 #ifndef __ASM_CACHE_H
5523 #define __ASM_CACHE_H
5524
5525+#include <linux/const.h>
5526+
5527 /* Bytes per L1 cache line */
5528-#define L1_CACHE_SHIFT (5)
5529-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5530+#define L1_CACHE_SHIFT 5
5531+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5532
5533 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
5534 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
5535diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5536index 2f3abcf..e63c7fa 100644
5537--- a/arch/ia64/Kconfig
5538+++ b/arch/ia64/Kconfig
5539@@ -547,6 +547,7 @@ source "drivers/sn/Kconfig"
5540 config KEXEC
5541 bool "kexec system call"
5542 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5543+ depends on !GRKERNSEC_KMEM
5544 help
5545 kexec is a system call that implements the ability to shutdown your
5546 current kernel, and to start another kernel. It is like a reboot
5547diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5548index 0f8bf48..40ea950 100644
5549--- a/arch/ia64/include/asm/atomic.h
5550+++ b/arch/ia64/include/asm/atomic.h
5551@@ -209,4 +209,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5552 #define atomic64_inc(v) atomic64_add(1, (v))
5553 #define atomic64_dec(v) atomic64_sub(1, (v))
5554
5555+#define atomic64_read_unchecked(v) atomic64_read(v)
5556+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5557+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5558+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5559+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5560+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5561+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5562+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5563+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5564+
5565 #endif /* _ASM_IA64_ATOMIC_H */
5566diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5567index a48957c..e097b56 100644
5568--- a/arch/ia64/include/asm/barrier.h
5569+++ b/arch/ia64/include/asm/barrier.h
5570@@ -67,7 +67,7 @@
5571 do { \
5572 compiletime_assert_atomic_type(*p); \
5573 barrier(); \
5574- ACCESS_ONCE(*p) = (v); \
5575+ ACCESS_ONCE_RW(*p) = (v); \
5576 } while (0)
5577
5578 #define smp_load_acquire(p) \
5579diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5580index 988254a..e1ee885 100644
5581--- a/arch/ia64/include/asm/cache.h
5582+++ b/arch/ia64/include/asm/cache.h
5583@@ -1,6 +1,7 @@
5584 #ifndef _ASM_IA64_CACHE_H
5585 #define _ASM_IA64_CACHE_H
5586
5587+#include <linux/const.h>
5588
5589 /*
5590 * Copyright (C) 1998-2000 Hewlett-Packard Co
5591@@ -9,7 +10,7 @@
5592
5593 /* Bytes per L1 (data) cache line. */
5594 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5595-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5596+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5597
5598 #ifdef CONFIG_SMP
5599 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5600diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5601index 5a83c5c..4d7f553 100644
5602--- a/arch/ia64/include/asm/elf.h
5603+++ b/arch/ia64/include/asm/elf.h
5604@@ -42,6 +42,13 @@
5605 */
5606 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5607
5608+#ifdef CONFIG_PAX_ASLR
5609+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5610+
5611+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5612+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5613+#endif
5614+
5615 #define PT_IA_64_UNWIND 0x70000001
5616
5617 /* IA-64 relocations: */
5618diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5619index 5767cdf..7462574 100644
5620--- a/arch/ia64/include/asm/pgalloc.h
5621+++ b/arch/ia64/include/asm/pgalloc.h
5622@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5623 pgd_val(*pgd_entry) = __pa(pud);
5624 }
5625
5626+static inline void
5627+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5628+{
5629+ pgd_populate(mm, pgd_entry, pud);
5630+}
5631+
5632 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5633 {
5634 return quicklist_alloc(0, GFP_KERNEL, NULL);
5635@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5636 pud_val(*pud_entry) = __pa(pmd);
5637 }
5638
5639+static inline void
5640+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5641+{
5642+ pud_populate(mm, pud_entry, pmd);
5643+}
5644+
5645 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5646 {
5647 return quicklist_alloc(0, GFP_KERNEL, NULL);
5648diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5649index 7935115..c0eca6a 100644
5650--- a/arch/ia64/include/asm/pgtable.h
5651+++ b/arch/ia64/include/asm/pgtable.h
5652@@ -12,7 +12,7 @@
5653 * David Mosberger-Tang <davidm@hpl.hp.com>
5654 */
5655
5656-
5657+#include <linux/const.h>
5658 #include <asm/mman.h>
5659 #include <asm/page.h>
5660 #include <asm/processor.h>
5661@@ -142,6 +142,17 @@
5662 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5663 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5664 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5665+
5666+#ifdef CONFIG_PAX_PAGEEXEC
5667+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5668+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5669+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5670+#else
5671+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5672+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5673+# define PAGE_COPY_NOEXEC PAGE_COPY
5674+#endif
5675+
5676 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5677 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5678 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5679diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5680index 45698cd..e8e2dbc 100644
5681--- a/arch/ia64/include/asm/spinlock.h
5682+++ b/arch/ia64/include/asm/spinlock.h
5683@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5684 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5685
5686 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5687- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5688+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5689 }
5690
5691 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5692diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5693index 449c8c0..3d4b1e9 100644
5694--- a/arch/ia64/include/asm/uaccess.h
5695+++ b/arch/ia64/include/asm/uaccess.h
5696@@ -70,6 +70,7 @@
5697 && ((segment).seg == KERNEL_DS.seg \
5698 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5699 })
5700+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5701 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5702
5703 /*
5704@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5705 static inline unsigned long
5706 __copy_to_user (void __user *to, const void *from, unsigned long count)
5707 {
5708+ if (count > INT_MAX)
5709+ return count;
5710+
5711+ if (!__builtin_constant_p(count))
5712+ check_object_size(from, count, true);
5713+
5714 return __copy_user(to, (__force void __user *) from, count);
5715 }
5716
5717 static inline unsigned long
5718 __copy_from_user (void *to, const void __user *from, unsigned long count)
5719 {
5720+ if (count > INT_MAX)
5721+ return count;
5722+
5723+ if (!__builtin_constant_p(count))
5724+ check_object_size(to, count, false);
5725+
5726 return __copy_user((__force void __user *) to, from, count);
5727 }
5728
5729@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5730 ({ \
5731 void __user *__cu_to = (to); \
5732 const void *__cu_from = (from); \
5733- long __cu_len = (n); \
5734+ unsigned long __cu_len = (n); \
5735 \
5736- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5737+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5738+ if (!__builtin_constant_p(n)) \
5739+ check_object_size(__cu_from, __cu_len, true); \
5740 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5741+ } \
5742 __cu_len; \
5743 })
5744
5745@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5746 ({ \
5747 void *__cu_to = (to); \
5748 const void __user *__cu_from = (from); \
5749- long __cu_len = (n); \
5750+ unsigned long __cu_len = (n); \
5751 \
5752 __chk_user_ptr(__cu_from); \
5753- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5754+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5755+ if (!__builtin_constant_p(n)) \
5756+ check_object_size(__cu_to, __cu_len, false); \
5757 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5758+ } \
5759 __cu_len; \
5760 })
5761
5762diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5763index 24603be..948052d 100644
5764--- a/arch/ia64/kernel/module.c
5765+++ b/arch/ia64/kernel/module.c
5766@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5767 void
5768 module_free (struct module *mod, void *module_region)
5769 {
5770- if (mod && mod->arch.init_unw_table &&
5771- module_region == mod->module_init) {
5772+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5773 unw_remove_unwind_table(mod->arch.init_unw_table);
5774 mod->arch.init_unw_table = NULL;
5775 }
5776@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5777 }
5778
5779 static inline int
5780+in_init_rx (const struct module *mod, uint64_t addr)
5781+{
5782+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5783+}
5784+
5785+static inline int
5786+in_init_rw (const struct module *mod, uint64_t addr)
5787+{
5788+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5789+}
5790+
5791+static inline int
5792 in_init (const struct module *mod, uint64_t addr)
5793 {
5794- return addr - (uint64_t) mod->module_init < mod->init_size;
5795+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5796+}
5797+
5798+static inline int
5799+in_core_rx (const struct module *mod, uint64_t addr)
5800+{
5801+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5802+}
5803+
5804+static inline int
5805+in_core_rw (const struct module *mod, uint64_t addr)
5806+{
5807+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5808 }
5809
5810 static inline int
5811 in_core (const struct module *mod, uint64_t addr)
5812 {
5813- return addr - (uint64_t) mod->module_core < mod->core_size;
5814+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5815 }
5816
5817 static inline int
5818@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5819 break;
5820
5821 case RV_BDREL:
5822- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5823+ if (in_init_rx(mod, val))
5824+ val -= (uint64_t) mod->module_init_rx;
5825+ else if (in_init_rw(mod, val))
5826+ val -= (uint64_t) mod->module_init_rw;
5827+ else if (in_core_rx(mod, val))
5828+ val -= (uint64_t) mod->module_core_rx;
5829+ else if (in_core_rw(mod, val))
5830+ val -= (uint64_t) mod->module_core_rw;
5831 break;
5832
5833 case RV_LTV:
5834@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5835 * addresses have been selected...
5836 */
5837 uint64_t gp;
5838- if (mod->core_size > MAX_LTOFF)
5839+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5840 /*
5841 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5842 * at the end of the module.
5843 */
5844- gp = mod->core_size - MAX_LTOFF / 2;
5845+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5846 else
5847- gp = mod->core_size / 2;
5848- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5849+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5850+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5851 mod->arch.gp = gp;
5852 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5853 }
5854diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5855index c39c3cd..3c77738 100644
5856--- a/arch/ia64/kernel/palinfo.c
5857+++ b/arch/ia64/kernel/palinfo.c
5858@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5859 return NOTIFY_OK;
5860 }
5861
5862-static struct notifier_block __refdata palinfo_cpu_notifier =
5863+static struct notifier_block palinfo_cpu_notifier =
5864 {
5865 .notifier_call = palinfo_cpu_callback,
5866 .priority = 0,
5867diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5868index 41e33f8..65180b2a 100644
5869--- a/arch/ia64/kernel/sys_ia64.c
5870+++ b/arch/ia64/kernel/sys_ia64.c
5871@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5872 unsigned long align_mask = 0;
5873 struct mm_struct *mm = current->mm;
5874 struct vm_unmapped_area_info info;
5875+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5876
5877 if (len > RGN_MAP_LIMIT)
5878 return -ENOMEM;
5879@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5880 if (REGION_NUMBER(addr) == RGN_HPAGE)
5881 addr = 0;
5882 #endif
5883+
5884+#ifdef CONFIG_PAX_RANDMMAP
5885+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5886+ addr = mm->free_area_cache;
5887+ else
5888+#endif
5889+
5890 if (!addr)
5891 addr = TASK_UNMAPPED_BASE;
5892
5893@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5894 info.high_limit = TASK_SIZE;
5895 info.align_mask = align_mask;
5896 info.align_offset = 0;
5897+ info.threadstack_offset = offset;
5898 return vm_unmapped_area(&info);
5899 }
5900
5901diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5902index 84f8a52..7c76178 100644
5903--- a/arch/ia64/kernel/vmlinux.lds.S
5904+++ b/arch/ia64/kernel/vmlinux.lds.S
5905@@ -192,7 +192,7 @@ SECTIONS {
5906 /* Per-cpu data: */
5907 . = ALIGN(PERCPU_PAGE_SIZE);
5908 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5909- __phys_per_cpu_start = __per_cpu_load;
5910+ __phys_per_cpu_start = per_cpu_load;
5911 /*
5912 * ensure percpu data fits
5913 * into percpu page size
5914diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5915index 7225dad..2a7c8256 100644
5916--- a/arch/ia64/mm/fault.c
5917+++ b/arch/ia64/mm/fault.c
5918@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5919 return pte_present(pte);
5920 }
5921
5922+#ifdef CONFIG_PAX_PAGEEXEC
5923+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5924+{
5925+ unsigned long i;
5926+
5927+ printk(KERN_ERR "PAX: bytes at PC: ");
5928+ for (i = 0; i < 8; i++) {
5929+ unsigned int c;
5930+ if (get_user(c, (unsigned int *)pc+i))
5931+ printk(KERN_CONT "???????? ");
5932+ else
5933+ printk(KERN_CONT "%08x ", c);
5934+ }
5935+ printk("\n");
5936+}
5937+#endif
5938+
5939 # define VM_READ_BIT 0
5940 # define VM_WRITE_BIT 1
5941 # define VM_EXEC_BIT 2
5942@@ -151,8 +168,21 @@ retry:
5943 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5944 goto bad_area;
5945
5946- if ((vma->vm_flags & mask) != mask)
5947+ if ((vma->vm_flags & mask) != mask) {
5948+
5949+#ifdef CONFIG_PAX_PAGEEXEC
5950+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5951+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5952+ goto bad_area;
5953+
5954+ up_read(&mm->mmap_sem);
5955+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5956+ do_group_exit(SIGKILL);
5957+ }
5958+#endif
5959+
5960 goto bad_area;
5961+ }
5962
5963 /*
5964 * If for any reason at all we couldn't handle the fault, make
5965diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5966index 76069c1..c2aa816 100644
5967--- a/arch/ia64/mm/hugetlbpage.c
5968+++ b/arch/ia64/mm/hugetlbpage.c
5969@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5970 unsigned long pgoff, unsigned long flags)
5971 {
5972 struct vm_unmapped_area_info info;
5973+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5974
5975 if (len > RGN_MAP_LIMIT)
5976 return -ENOMEM;
5977@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5978 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5979 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5980 info.align_offset = 0;
5981+ info.threadstack_offset = offset;
5982 return vm_unmapped_area(&info);
5983 }
5984
5985diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5986index 25c3502..560dae7 100644
5987--- a/arch/ia64/mm/init.c
5988+++ b/arch/ia64/mm/init.c
5989@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5990 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5991 vma->vm_end = vma->vm_start + PAGE_SIZE;
5992 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5993+
5994+#ifdef CONFIG_PAX_PAGEEXEC
5995+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5996+ vma->vm_flags &= ~VM_EXEC;
5997+
5998+#ifdef CONFIG_PAX_MPROTECT
5999+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
6000+ vma->vm_flags &= ~VM_MAYEXEC;
6001+#endif
6002+
6003+ }
6004+#endif
6005+
6006 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6007 down_write(&current->mm->mmap_sem);
6008 if (insert_vm_struct(current->mm, vma)) {
6009diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
6010index 40b3ee9..8c2c112 100644
6011--- a/arch/m32r/include/asm/cache.h
6012+++ b/arch/m32r/include/asm/cache.h
6013@@ -1,8 +1,10 @@
6014 #ifndef _ASM_M32R_CACHE_H
6015 #define _ASM_M32R_CACHE_H
6016
6017+#include <linux/const.h>
6018+
6019 /* L1 cache line size */
6020 #define L1_CACHE_SHIFT 4
6021-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6022+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6023
6024 #endif /* _ASM_M32R_CACHE_H */
6025diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
6026index 82abd15..d95ae5d 100644
6027--- a/arch/m32r/lib/usercopy.c
6028+++ b/arch/m32r/lib/usercopy.c
6029@@ -14,6 +14,9 @@
6030 unsigned long
6031 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
6032 {
6033+ if ((long)n < 0)
6034+ return n;
6035+
6036 prefetch(from);
6037 if (access_ok(VERIFY_WRITE, to, n))
6038 __copy_user(to,from,n);
6039@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
6040 unsigned long
6041 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
6042 {
6043+ if ((long)n < 0)
6044+ return n;
6045+
6046 prefetchw(to);
6047 if (access_ok(VERIFY_READ, from, n))
6048 __copy_user_zeroing(to,from,n);
6049diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
6050index 0395c51..5f26031 100644
6051--- a/arch/m68k/include/asm/cache.h
6052+++ b/arch/m68k/include/asm/cache.h
6053@@ -4,9 +4,11 @@
6054 #ifndef __ARCH_M68K_CACHE_H
6055 #define __ARCH_M68K_CACHE_H
6056
6057+#include <linux/const.h>
6058+
6059 /* bytes per L1 cache line */
6060 #define L1_CACHE_SHIFT 4
6061-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
6062+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6063
6064 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
6065
6066diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
6067index c7591e8..ecef036 100644
6068--- a/arch/metag/include/asm/barrier.h
6069+++ b/arch/metag/include/asm/barrier.h
6070@@ -89,7 +89,7 @@ static inline void fence(void)
6071 do { \
6072 compiletime_assert_atomic_type(*p); \
6073 smp_mb(); \
6074- ACCESS_ONCE(*p) = (v); \
6075+ ACCESS_ONCE_RW(*p) = (v); \
6076 } while (0)
6077
6078 #define smp_load_acquire(p) \
6079diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
6080index 3c52fa6..11b2ad8 100644
6081--- a/arch/metag/mm/hugetlbpage.c
6082+++ b/arch/metag/mm/hugetlbpage.c
6083@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
6084 info.high_limit = TASK_SIZE;
6085 info.align_mask = PAGE_MASK & HUGEPT_MASK;
6086 info.align_offset = 0;
6087+ info.threadstack_offset = 0;
6088 return vm_unmapped_area(&info);
6089 }
6090
6091diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
6092index 4efe96a..60e8699 100644
6093--- a/arch/microblaze/include/asm/cache.h
6094+++ b/arch/microblaze/include/asm/cache.h
6095@@ -13,11 +13,12 @@
6096 #ifndef _ASM_MICROBLAZE_CACHE_H
6097 #define _ASM_MICROBLAZE_CACHE_H
6098
6099+#include <linux/const.h>
6100 #include <asm/registers.h>
6101
6102 #define L1_CACHE_SHIFT 5
6103 /* word-granular cache in microblaze */
6104-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6105+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6106
6107 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6108
6109diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
6110index 4e238e6..7c9ed92 100644
6111--- a/arch/mips/Kconfig
6112+++ b/arch/mips/Kconfig
6113@@ -2392,6 +2392,7 @@ source "kernel/Kconfig.preempt"
6114
6115 config KEXEC
6116 bool "Kexec system call"
6117+ depends on !GRKERNSEC_KMEM
6118 help
6119 kexec is a system call that implements the ability to shutdown your
6120 current kernel, and to start another kernel. It is like a reboot
6121diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
6122index 02f2444..506969c 100644
6123--- a/arch/mips/cavium-octeon/dma-octeon.c
6124+++ b/arch/mips/cavium-octeon/dma-octeon.c
6125@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
6126 if (dma_release_from_coherent(dev, order, vaddr))
6127 return;
6128
6129- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
6130+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
6131 }
6132
6133 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
6134diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
6135index 37b2bef..02122b8 100644
6136--- a/arch/mips/include/asm/atomic.h
6137+++ b/arch/mips/include/asm/atomic.h
6138@@ -21,15 +21,39 @@
6139 #include <asm/cmpxchg.h>
6140 #include <asm/war.h>
6141
6142+#ifdef CONFIG_GENERIC_ATOMIC64
6143+#include <asm-generic/atomic64.h>
6144+#endif
6145+
6146 #define ATOMIC_INIT(i) { (i) }
6147
6148+#ifdef CONFIG_64BIT
6149+#define _ASM_EXTABLE(from, to) \
6150+" .section __ex_table,\"a\"\n" \
6151+" .dword " #from ", " #to"\n" \
6152+" .previous\n"
6153+#else
6154+#define _ASM_EXTABLE(from, to) \
6155+" .section __ex_table,\"a\"\n" \
6156+" .word " #from ", " #to"\n" \
6157+" .previous\n"
6158+#endif
6159+
6160 /*
6161 * atomic_read - read atomic variable
6162 * @v: pointer of type atomic_t
6163 *
6164 * Atomically reads the value of @v.
6165 */
6166-#define atomic_read(v) (*(volatile int *)&(v)->counter)
6167+static inline int atomic_read(const atomic_t *v)
6168+{
6169+ return (*(volatile const int *) &v->counter);
6170+}
6171+
6172+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6173+{
6174+ return (*(volatile const int *) &v->counter);
6175+}
6176
6177 /*
6178 * atomic_set - set atomic variable
6179@@ -38,7 +62,15 @@
6180 *
6181 * Atomically sets the value of @v to @i.
6182 */
6183-#define atomic_set(v, i) ((v)->counter = (i))
6184+static inline void atomic_set(atomic_t *v, int i)
6185+{
6186+ v->counter = i;
6187+}
6188+
6189+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6190+{
6191+ v->counter = i;
6192+}
6193
6194 /*
6195 * atomic_add - add integer to atomic variable
6196@@ -47,7 +79,67 @@
6197 *
6198 * Atomically adds @i to @v.
6199 */
6200-static __inline__ void atomic_add(int i, atomic_t * v)
6201+static __inline__ void atomic_add(int i, atomic_t *v)
6202+{
6203+ int temp;
6204+
6205+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6206+ __asm__ __volatile__(
6207+ " .set mips3 \n"
6208+ "1: ll %0, %1 # atomic_add \n"
6209+#ifdef CONFIG_PAX_REFCOUNT
6210+ /* Exception on overflow. */
6211+ "2: add %0, %2 \n"
6212+#else
6213+ " addu %0, %2 \n"
6214+#endif
6215+ " sc %0, %1 \n"
6216+ " beqzl %0, 1b \n"
6217+#ifdef CONFIG_PAX_REFCOUNT
6218+ "3: \n"
6219+ _ASM_EXTABLE(2b, 3b)
6220+#endif
6221+ " .set mips0 \n"
6222+ : "=&r" (temp), "+m" (v->counter)
6223+ : "Ir" (i));
6224+ } else if (kernel_uses_llsc) {
6225+ __asm__ __volatile__(
6226+ " .set mips3 \n"
6227+ "1: ll %0, %1 # atomic_add \n"
6228+#ifdef CONFIG_PAX_REFCOUNT
6229+ /* Exception on overflow. */
6230+ "2: add %0, %2 \n"
6231+#else
6232+ " addu %0, %2 \n"
6233+#endif
6234+ " sc %0, %1 \n"
6235+ " beqz %0, 1b \n"
6236+#ifdef CONFIG_PAX_REFCOUNT
6237+ "3: \n"
6238+ _ASM_EXTABLE(2b, 3b)
6239+#endif
6240+ " .set mips0 \n"
6241+ : "=&r" (temp), "+m" (v->counter)
6242+ : "Ir" (i));
6243+ } else {
6244+ unsigned long flags;
6245+
6246+ raw_local_irq_save(flags);
6247+ __asm__ __volatile__(
6248+#ifdef CONFIG_PAX_REFCOUNT
6249+ /* Exception on overflow. */
6250+ "1: add %0, %1 \n"
6251+ "2: \n"
6252+ _ASM_EXTABLE(1b, 2b)
6253+#else
6254+ " addu %0, %1 \n"
6255+#endif
6256+ : "+r" (v->counter) : "Ir" (i));
6257+ raw_local_irq_restore(flags);
6258+ }
6259+}
6260+
6261+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6262 {
6263 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6264 int temp;
6265@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
6266 *
6267 * Atomically subtracts @i from @v.
6268 */
6269-static __inline__ void atomic_sub(int i, atomic_t * v)
6270+static __inline__ void atomic_sub(int i, atomic_t *v)
6271+{
6272+ int temp;
6273+
6274+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6275+ __asm__ __volatile__(
6276+ " .set mips3 \n"
6277+ "1: ll %0, %1 # atomic64_sub \n"
6278+#ifdef CONFIG_PAX_REFCOUNT
6279+ /* Exception on overflow. */
6280+ "2: sub %0, %2 \n"
6281+#else
6282+ " subu %0, %2 \n"
6283+#endif
6284+ " sc %0, %1 \n"
6285+ " beqzl %0, 1b \n"
6286+#ifdef CONFIG_PAX_REFCOUNT
6287+ "3: \n"
6288+ _ASM_EXTABLE(2b, 3b)
6289+#endif
6290+ " .set mips0 \n"
6291+ : "=&r" (temp), "+m" (v->counter)
6292+ : "Ir" (i));
6293+ } else if (kernel_uses_llsc) {
6294+ __asm__ __volatile__(
6295+ " .set mips3 \n"
6296+ "1: ll %0, %1 # atomic64_sub \n"
6297+#ifdef CONFIG_PAX_REFCOUNT
6298+ /* Exception on overflow. */
6299+ "2: sub %0, %2 \n"
6300+#else
6301+ " subu %0, %2 \n"
6302+#endif
6303+ " sc %0, %1 \n"
6304+ " beqz %0, 1b \n"
6305+#ifdef CONFIG_PAX_REFCOUNT
6306+ "3: \n"
6307+ _ASM_EXTABLE(2b, 3b)
6308+#endif
6309+ " .set mips0 \n"
6310+ : "=&r" (temp), "+m" (v->counter)
6311+ : "Ir" (i));
6312+ } else {
6313+ unsigned long flags;
6314+
6315+ raw_local_irq_save(flags);
6316+ __asm__ __volatile__(
6317+#ifdef CONFIG_PAX_REFCOUNT
6318+ /* Exception on overflow. */
6319+ "1: sub %0, %1 \n"
6320+ "2: \n"
6321+ _ASM_EXTABLE(1b, 2b)
6322+#else
6323+ " subu %0, %1 \n"
6324+#endif
6325+ : "+r" (v->counter) : "Ir" (i));
6326+ raw_local_irq_restore(flags);
6327+ }
6328+}
6329+
6330+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
6331 {
6332 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6333 int temp;
6334@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
6335 /*
6336 * Same as above, but return the result value
6337 */
6338-static __inline__ int atomic_add_return(int i, atomic_t * v)
6339+static __inline__ int atomic_add_return(int i, atomic_t *v)
6340+{
6341+ int result;
6342+ int temp;
6343+
6344+ smp_mb__before_llsc();
6345+
6346+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6347+ __asm__ __volatile__(
6348+ " .set mips3 \n"
6349+ "1: ll %1, %2 # atomic_add_return \n"
6350+#ifdef CONFIG_PAX_REFCOUNT
6351+ "2: add %0, %1, %3 \n"
6352+#else
6353+ " addu %0, %1, %3 \n"
6354+#endif
6355+ " sc %0, %2 \n"
6356+ " beqzl %0, 1b \n"
6357+#ifdef CONFIG_PAX_REFCOUNT
6358+ " b 4f \n"
6359+ " .set noreorder \n"
6360+ "3: b 5f \n"
6361+ " move %0, %1 \n"
6362+ " .set reorder \n"
6363+ _ASM_EXTABLE(2b, 3b)
6364+#endif
6365+ "4: addu %0, %1, %3 \n"
6366+#ifdef CONFIG_PAX_REFCOUNT
6367+ "5: \n"
6368+#endif
6369+ " .set mips0 \n"
6370+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6371+ : "Ir" (i));
6372+ } else if (kernel_uses_llsc) {
6373+ __asm__ __volatile__(
6374+ " .set mips3 \n"
6375+ "1: ll %1, %2 # atomic_add_return \n"
6376+#ifdef CONFIG_PAX_REFCOUNT
6377+ "2: add %0, %1, %3 \n"
6378+#else
6379+ " addu %0, %1, %3 \n"
6380+#endif
6381+ " sc %0, %2 \n"
6382+ " bnez %0, 4f \n"
6383+ " b 1b \n"
6384+#ifdef CONFIG_PAX_REFCOUNT
6385+ " .set noreorder \n"
6386+ "3: b 5f \n"
6387+ " move %0, %1 \n"
6388+ " .set reorder \n"
6389+ _ASM_EXTABLE(2b, 3b)
6390+#endif
6391+ "4: addu %0, %1, %3 \n"
6392+#ifdef CONFIG_PAX_REFCOUNT
6393+ "5: \n"
6394+#endif
6395+ " .set mips0 \n"
6396+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6397+ : "Ir" (i));
6398+ } else {
6399+ unsigned long flags;
6400+
6401+ raw_local_irq_save(flags);
6402+ __asm__ __volatile__(
6403+ " lw %0, %1 \n"
6404+#ifdef CONFIG_PAX_REFCOUNT
6405+ /* Exception on overflow. */
6406+ "1: add %0, %2 \n"
6407+#else
6408+ " addu %0, %2 \n"
6409+#endif
6410+ " sw %0, %1 \n"
6411+#ifdef CONFIG_PAX_REFCOUNT
6412+ /* Note: Dest reg is not modified on overflow */
6413+ "2: \n"
6414+ _ASM_EXTABLE(1b, 2b)
6415+#endif
6416+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6417+ raw_local_irq_restore(flags);
6418+ }
6419+
6420+ smp_llsc_mb();
6421+
6422+ return result;
6423+}
6424+
6425+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6426 {
6427 int result;
6428
6429@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
6430 return result;
6431 }
6432
6433-static __inline__ int atomic_sub_return(int i, atomic_t * v)
6434+static __inline__ int atomic_sub_return(int i, atomic_t *v)
6435+{
6436+ int result;
6437+ int temp;
6438+
6439+ smp_mb__before_llsc();
6440+
6441+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6442+ __asm__ __volatile__(
6443+ " .set mips3 \n"
6444+ "1: ll %1, %2 # atomic_sub_return \n"
6445+#ifdef CONFIG_PAX_REFCOUNT
6446+ "2: sub %0, %1, %3 \n"
6447+#else
6448+ " subu %0, %1, %3 \n"
6449+#endif
6450+ " sc %0, %2 \n"
6451+ " beqzl %0, 1b \n"
6452+#ifdef CONFIG_PAX_REFCOUNT
6453+ " b 4f \n"
6454+ " .set noreorder \n"
6455+ "3: b 5f \n"
6456+ " move %0, %1 \n"
6457+ " .set reorder \n"
6458+ _ASM_EXTABLE(2b, 3b)
6459+#endif
6460+ "4: subu %0, %1, %3 \n"
6461+#ifdef CONFIG_PAX_REFCOUNT
6462+ "5: \n"
6463+#endif
6464+ " .set mips0 \n"
6465+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6466+ : "Ir" (i), "m" (v->counter)
6467+ : "memory");
6468+ } else if (kernel_uses_llsc) {
6469+ __asm__ __volatile__(
6470+ " .set mips3 \n"
6471+ "1: ll %1, %2 # atomic_sub_return \n"
6472+#ifdef CONFIG_PAX_REFCOUNT
6473+ "2: sub %0, %1, %3 \n"
6474+#else
6475+ " subu %0, %1, %3 \n"
6476+#endif
6477+ " sc %0, %2 \n"
6478+ " bnez %0, 4f \n"
6479+ " b 1b \n"
6480+#ifdef CONFIG_PAX_REFCOUNT
6481+ " .set noreorder \n"
6482+ "3: b 5f \n"
6483+ " move %0, %1 \n"
6484+ " .set reorder \n"
6485+ _ASM_EXTABLE(2b, 3b)
6486+#endif
6487+ "4: subu %0, %1, %3 \n"
6488+#ifdef CONFIG_PAX_REFCOUNT
6489+ "5: \n"
6490+#endif
6491+ " .set mips0 \n"
6492+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6493+ : "Ir" (i));
6494+ } else {
6495+ unsigned long flags;
6496+
6497+ raw_local_irq_save(flags);
6498+ __asm__ __volatile__(
6499+ " lw %0, %1 \n"
6500+#ifdef CONFIG_PAX_REFCOUNT
6501+ /* Exception on overflow. */
6502+ "1: sub %0, %2 \n"
6503+#else
6504+ " subu %0, %2 \n"
6505+#endif
6506+ " sw %0, %1 \n"
6507+#ifdef CONFIG_PAX_REFCOUNT
6508+ /* Note: Dest reg is not modified on overflow */
6509+ "2: \n"
6510+ _ASM_EXTABLE(1b, 2b)
6511+#endif
6512+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6513+ raw_local_irq_restore(flags);
6514+ }
6515+
6516+ smp_llsc_mb();
6517+
6518+ return result;
6519+}
6520+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
6521 {
6522 int result;
6523
6524@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
6525 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6526 * The function returns the old value of @v minus @i.
6527 */
6528-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6529+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
6530 {
6531 int result;
6532
6533@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6534 return result;
6535 }
6536
6537-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6538-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
6539+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6540+{
6541+ return cmpxchg(&v->counter, old, new);
6542+}
6543+
6544+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
6545+ int new)
6546+{
6547+ return cmpxchg(&(v->counter), old, new);
6548+}
6549+
6550+static inline int atomic_xchg(atomic_t *v, int new)
6551+{
6552+ return xchg(&v->counter, new);
6553+}
6554+
6555+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6556+{
6557+ return xchg(&(v->counter), new);
6558+}
6559
6560 /**
6561 * __atomic_add_unless - add unless the number is a given value
6562@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6563
6564 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6565 #define atomic_inc_return(v) atomic_add_return(1, (v))
6566+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6567+{
6568+ return atomic_add_return_unchecked(1, v);
6569+}
6570
6571 /*
6572 * atomic_sub_and_test - subtract value from variable and test result
6573@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6574 * other cases.
6575 */
6576 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6577+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6578+{
6579+ return atomic_add_return_unchecked(1, v) == 0;
6580+}
6581
6582 /*
6583 * atomic_dec_and_test - decrement by 1 and test
6584@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6585 * Atomically increments @v by 1.
6586 */
6587 #define atomic_inc(v) atomic_add(1, (v))
6588+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6589+{
6590+ atomic_add_unchecked(1, v);
6591+}
6592
6593 /*
6594 * atomic_dec - decrement and test
6595@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6596 * Atomically decrements @v by 1.
6597 */
6598 #define atomic_dec(v) atomic_sub(1, (v))
6599+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6600+{
6601+ atomic_sub_unchecked(1, v);
6602+}
6603
6604 /*
6605 * atomic_add_negative - add and test if negative
6606@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6607 * @v: pointer of type atomic64_t
6608 *
6609 */
6610-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
6611+static inline long atomic64_read(const atomic64_t *v)
6612+{
6613+ return (*(volatile const long *) &v->counter);
6614+}
6615+
6616+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6617+{
6618+ return (*(volatile const long *) &v->counter);
6619+}
6620
6621 /*
6622 * atomic64_set - set atomic variable
6623 * @v: pointer of type atomic64_t
6624 * @i: required value
6625 */
6626-#define atomic64_set(v, i) ((v)->counter = (i))
6627+static inline void atomic64_set(atomic64_t *v, long i)
6628+{
6629+ v->counter = i;
6630+}
6631+
6632+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6633+{
6634+ v->counter = i;
6635+}
6636
6637 /*
6638 * atomic64_add - add integer to atomic variable
6639@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6640 *
6641 * Atomically adds @i to @v.
6642 */
6643-static __inline__ void atomic64_add(long i, atomic64_t * v)
6644+static __inline__ void atomic64_add(long i, atomic64_t *v)
6645+{
6646+ long temp;
6647+
6648+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6649+ __asm__ __volatile__(
6650+ " .set mips3 \n"
6651+ "1: lld %0, %1 # atomic64_add \n"
6652+#ifdef CONFIG_PAX_REFCOUNT
6653+ /* Exception on overflow. */
6654+ "2: dadd %0, %2 \n"
6655+#else
6656+ " daddu %0, %2 \n"
6657+#endif
6658+ " scd %0, %1 \n"
6659+ " beqzl %0, 1b \n"
6660+#ifdef CONFIG_PAX_REFCOUNT
6661+ "3: \n"
6662+ _ASM_EXTABLE(2b, 3b)
6663+#endif
6664+ " .set mips0 \n"
6665+ : "=&r" (temp), "+m" (v->counter)
6666+ : "Ir" (i));
6667+ } else if (kernel_uses_llsc) {
6668+ __asm__ __volatile__(
6669+ " .set mips3 \n"
6670+ "1: lld %0, %1 # atomic64_add \n"
6671+#ifdef CONFIG_PAX_REFCOUNT
6672+ /* Exception on overflow. */
6673+ "2: dadd %0, %2 \n"
6674+#else
6675+ " daddu %0, %2 \n"
6676+#endif
6677+ " scd %0, %1 \n"
6678+ " beqz %0, 1b \n"
6679+#ifdef CONFIG_PAX_REFCOUNT
6680+ "3: \n"
6681+ _ASM_EXTABLE(2b, 3b)
6682+#endif
6683+ " .set mips0 \n"
6684+ : "=&r" (temp), "+m" (v->counter)
6685+ : "Ir" (i));
6686+ } else {
6687+ unsigned long flags;
6688+
6689+ raw_local_irq_save(flags);
6690+ __asm__ __volatile__(
6691+#ifdef CONFIG_PAX_REFCOUNT
6692+ /* Exception on overflow. */
6693+ "1: dadd %0, %1 \n"
6694+ "2: \n"
6695+ _ASM_EXTABLE(1b, 2b)
6696+#else
6697+ " daddu %0, %1 \n"
6698+#endif
6699+ : "+r" (v->counter) : "Ir" (i));
6700+ raw_local_irq_restore(flags);
6701+ }
6702+}
6703+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6704 {
6705 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6706 long temp;
6707@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
6708 *
6709 * Atomically subtracts @i from @v.
6710 */
6711-static __inline__ void atomic64_sub(long i, atomic64_t * v)
6712+static __inline__ void atomic64_sub(long i, atomic64_t *v)
6713+{
6714+ long temp;
6715+
6716+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6717+ __asm__ __volatile__(
6718+ " .set mips3 \n"
6719+ "1: lld %0, %1 # atomic64_sub \n"
6720+#ifdef CONFIG_PAX_REFCOUNT
6721+ /* Exception on overflow. */
6722+ "2: dsub %0, %2 \n"
6723+#else
6724+ " dsubu %0, %2 \n"
6725+#endif
6726+ " scd %0, %1 \n"
6727+ " beqzl %0, 1b \n"
6728+#ifdef CONFIG_PAX_REFCOUNT
6729+ "3: \n"
6730+ _ASM_EXTABLE(2b, 3b)
6731+#endif
6732+ " .set mips0 \n"
6733+ : "=&r" (temp), "+m" (v->counter)
6734+ : "Ir" (i));
6735+ } else if (kernel_uses_llsc) {
6736+ __asm__ __volatile__(
6737+ " .set mips3 \n"
6738+ "1: lld %0, %1 # atomic64_sub \n"
6739+#ifdef CONFIG_PAX_REFCOUNT
6740+ /* Exception on overflow. */
6741+ "2: dsub %0, %2 \n"
6742+#else
6743+ " dsubu %0, %2 \n"
6744+#endif
6745+ " scd %0, %1 \n"
6746+ " beqz %0, 1b \n"
6747+#ifdef CONFIG_PAX_REFCOUNT
6748+ "3: \n"
6749+ _ASM_EXTABLE(2b, 3b)
6750+#endif
6751+ " .set mips0 \n"
6752+ : "=&r" (temp), "+m" (v->counter)
6753+ : "Ir" (i));
6754+ } else {
6755+ unsigned long flags;
6756+
6757+ raw_local_irq_save(flags);
6758+ __asm__ __volatile__(
6759+#ifdef CONFIG_PAX_REFCOUNT
6760+ /* Exception on overflow. */
6761+ "1: dsub %0, %1 \n"
6762+ "2: \n"
6763+ _ASM_EXTABLE(1b, 2b)
6764+#else
6765+ " dsubu %0, %1 \n"
6766+#endif
6767+ : "+r" (v->counter) : "Ir" (i));
6768+ raw_local_irq_restore(flags);
6769+ }
6770+}
6771+
6772+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6773 {
6774 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6775 long temp;
6776@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
6777 /*
6778 * Same as above, but return the result value
6779 */
6780-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6781+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
6782+{
6783+ long result;
6784+ long temp;
6785+
6786+ smp_mb__before_llsc();
6787+
6788+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6789+ __asm__ __volatile__(
6790+ " .set mips3 \n"
6791+ "1: lld %1, %2 # atomic64_add_return \n"
6792+#ifdef CONFIG_PAX_REFCOUNT
6793+ "2: dadd %0, %1, %3 \n"
6794+#else
6795+ " daddu %0, %1, %3 \n"
6796+#endif
6797+ " scd %0, %2 \n"
6798+ " beqzl %0, 1b \n"
6799+#ifdef CONFIG_PAX_REFCOUNT
6800+ " b 4f \n"
6801+ " .set noreorder \n"
6802+ "3: b 5f \n"
6803+ " move %0, %1 \n"
6804+ " .set reorder \n"
6805+ _ASM_EXTABLE(2b, 3b)
6806+#endif
6807+ "4: daddu %0, %1, %3 \n"
6808+#ifdef CONFIG_PAX_REFCOUNT
6809+ "5: \n"
6810+#endif
6811+ " .set mips0 \n"
6812+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6813+ : "Ir" (i));
6814+ } else if (kernel_uses_llsc) {
6815+ __asm__ __volatile__(
6816+ " .set mips3 \n"
6817+ "1: lld %1, %2 # atomic64_add_return \n"
6818+#ifdef CONFIG_PAX_REFCOUNT
6819+ "2: dadd %0, %1, %3 \n"
6820+#else
6821+ " daddu %0, %1, %3 \n"
6822+#endif
6823+ " scd %0, %2 \n"
6824+ " bnez %0, 4f \n"
6825+ " b 1b \n"
6826+#ifdef CONFIG_PAX_REFCOUNT
6827+ " .set noreorder \n"
6828+ "3: b 5f \n"
6829+ " move %0, %1 \n"
6830+ " .set reorder \n"
6831+ _ASM_EXTABLE(2b, 3b)
6832+#endif
6833+ "4: daddu %0, %1, %3 \n"
6834+#ifdef CONFIG_PAX_REFCOUNT
6835+ "5: \n"
6836+#endif
6837+ " .set mips0 \n"
6838+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6839+ : "Ir" (i), "m" (v->counter)
6840+ : "memory");
6841+ } else {
6842+ unsigned long flags;
6843+
6844+ raw_local_irq_save(flags);
6845+ __asm__ __volatile__(
6846+ " ld %0, %1 \n"
6847+#ifdef CONFIG_PAX_REFCOUNT
6848+ /* Exception on overflow. */
6849+ "1: dadd %0, %2 \n"
6850+#else
6851+ " daddu %0, %2 \n"
6852+#endif
6853+ " sd %0, %1 \n"
6854+#ifdef CONFIG_PAX_REFCOUNT
6855+ /* Note: Dest reg is not modified on overflow */
6856+ "2: \n"
6857+ _ASM_EXTABLE(1b, 2b)
6858+#endif
6859+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6860+ raw_local_irq_restore(flags);
6861+ }
6862+
6863+ smp_llsc_mb();
6864+
6865+ return result;
6866+}
6867+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6868 {
6869 long result;
6870
6871@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6872 return result;
6873 }
6874
6875-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6876+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6877+{
6878+ long result;
6879+ long temp;
6880+
6881+ smp_mb__before_llsc();
6882+
6883+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6884+ long temp;
6885+
6886+ __asm__ __volatile__(
6887+ " .set mips3 \n"
6888+ "1: lld %1, %2 # atomic64_sub_return \n"
6889+#ifdef CONFIG_PAX_REFCOUNT
6890+ "2: dsub %0, %1, %3 \n"
6891+#else
6892+ " dsubu %0, %1, %3 \n"
6893+#endif
6894+ " scd %0, %2 \n"
6895+ " beqzl %0, 1b \n"
6896+#ifdef CONFIG_PAX_REFCOUNT
6897+ " b 4f \n"
6898+ " .set noreorder \n"
6899+ "3: b 5f \n"
6900+ " move %0, %1 \n"
6901+ " .set reorder \n"
6902+ _ASM_EXTABLE(2b, 3b)
6903+#endif
6904+ "4: dsubu %0, %1, %3 \n"
6905+#ifdef CONFIG_PAX_REFCOUNT
6906+ "5: \n"
6907+#endif
6908+ " .set mips0 \n"
6909+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6910+ : "Ir" (i), "m" (v->counter)
6911+ : "memory");
6912+ } else if (kernel_uses_llsc) {
6913+ __asm__ __volatile__(
6914+ " .set mips3 \n"
6915+ "1: lld %1, %2 # atomic64_sub_return \n"
6916+#ifdef CONFIG_PAX_REFCOUNT
6917+ "2: dsub %0, %1, %3 \n"
6918+#else
6919+ " dsubu %0, %1, %3 \n"
6920+#endif
6921+ " scd %0, %2 \n"
6922+ " bnez %0, 4f \n"
6923+ " b 1b \n"
6924+#ifdef CONFIG_PAX_REFCOUNT
6925+ " .set noreorder \n"
6926+ "3: b 5f \n"
6927+ " move %0, %1 \n"
6928+ " .set reorder \n"
6929+ _ASM_EXTABLE(2b, 3b)
6930+#endif
6931+ "4: dsubu %0, %1, %3 \n"
6932+#ifdef CONFIG_PAX_REFCOUNT
6933+ "5: \n"
6934+#endif
6935+ " .set mips0 \n"
6936+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6937+ : "Ir" (i), "m" (v->counter)
6938+ : "memory");
6939+ } else {
6940+ unsigned long flags;
6941+
6942+ raw_local_irq_save(flags);
6943+ __asm__ __volatile__(
6944+ " ld %0, %1 \n"
6945+#ifdef CONFIG_PAX_REFCOUNT
6946+ /* Exception on overflow. */
6947+ "1: dsub %0, %2 \n"
6948+#else
6949+ " dsubu %0, %2 \n"
6950+#endif
6951+ " sd %0, %1 \n"
6952+#ifdef CONFIG_PAX_REFCOUNT
6953+ /* Note: Dest reg is not modified on overflow */
6954+ "2: \n"
6955+ _ASM_EXTABLE(1b, 2b)
6956+#endif
6957+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6958+ raw_local_irq_restore(flags);
6959+ }
6960+
6961+ smp_llsc_mb();
6962+
6963+ return result;
6964+}
6965+
6966+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6967 {
6968 long result;
6969
6970@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6971 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6972 * The function returns the old value of @v minus @i.
6973 */
6974-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6975+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6976 {
6977 long result;
6978
6979@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6980 return result;
6981 }
6982
6983-#define atomic64_cmpxchg(v, o, n) \
6984- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6985-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6986+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6987+{
6988+ return cmpxchg(&v->counter, old, new);
6989+}
6990+
6991+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6992+ long new)
6993+{
6994+ return cmpxchg(&(v->counter), old, new);
6995+}
6996+
6997+static inline long atomic64_xchg(atomic64_t *v, long new)
6998+{
6999+ return xchg(&v->counter, new);
7000+}
7001+
7002+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7003+{
7004+ return xchg(&(v->counter), new);
7005+}
7006
7007 /**
7008 * atomic64_add_unless - add unless the number is a given value
7009@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7010
7011 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
7012 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
7013+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
7014
7015 /*
7016 * atomic64_sub_and_test - subtract value from variable and test result
7017@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7018 * other cases.
7019 */
7020 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
7021+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
7022
7023 /*
7024 * atomic64_dec_and_test - decrement by 1 and test
7025@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7026 * Atomically increments @v by 1.
7027 */
7028 #define atomic64_inc(v) atomic64_add(1, (v))
7029+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
7030
7031 /*
7032 * atomic64_dec - decrement and test
7033@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7034 * Atomically decrements @v by 1.
7035 */
7036 #define atomic64_dec(v) atomic64_sub(1, (v))
7037+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
7038
7039 /*
7040 * atomic64_add_negative - add and test if negative
7041diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
7042index d0101dd..266982c 100644
7043--- a/arch/mips/include/asm/barrier.h
7044+++ b/arch/mips/include/asm/barrier.h
7045@@ -184,7 +184,7 @@
7046 do { \
7047 compiletime_assert_atomic_type(*p); \
7048 smp_mb(); \
7049- ACCESS_ONCE(*p) = (v); \
7050+ ACCESS_ONCE_RW(*p) = (v); \
7051 } while (0)
7052
7053 #define smp_load_acquire(p) \
7054diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
7055index b4db69f..8f3b093 100644
7056--- a/arch/mips/include/asm/cache.h
7057+++ b/arch/mips/include/asm/cache.h
7058@@ -9,10 +9,11 @@
7059 #ifndef _ASM_CACHE_H
7060 #define _ASM_CACHE_H
7061
7062+#include <linux/const.h>
7063 #include <kmalloc.h>
7064
7065 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
7066-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7067+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7068
7069 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
7070 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7071diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
7072index d414405..6bb4ba2 100644
7073--- a/arch/mips/include/asm/elf.h
7074+++ b/arch/mips/include/asm/elf.h
7075@@ -398,13 +398,16 @@ extern const char *__elf_platform;
7076 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7077 #endif
7078
7079+#ifdef CONFIG_PAX_ASLR
7080+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7081+
7082+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7083+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7084+#endif
7085+
7086 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7087 struct linux_binprm;
7088 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
7089 int uses_interp);
7090
7091-struct mm_struct;
7092-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7093-#define arch_randomize_brk arch_randomize_brk
7094-
7095 #endif /* _ASM_ELF_H */
7096diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
7097index c1f6afa..38cc6e9 100644
7098--- a/arch/mips/include/asm/exec.h
7099+++ b/arch/mips/include/asm/exec.h
7100@@ -12,6 +12,6 @@
7101 #ifndef _ASM_EXEC_H
7102 #define _ASM_EXEC_H
7103
7104-extern unsigned long arch_align_stack(unsigned long sp);
7105+#define arch_align_stack(x) ((x) & ~0xfUL)
7106
7107 #endif /* _ASM_EXEC_H */
7108diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
7109index 9e8ef59..1139d6b 100644
7110--- a/arch/mips/include/asm/hw_irq.h
7111+++ b/arch/mips/include/asm/hw_irq.h
7112@@ -10,7 +10,7 @@
7113
7114 #include <linux/atomic.h>
7115
7116-extern atomic_t irq_err_count;
7117+extern atomic_unchecked_t irq_err_count;
7118
7119 /*
7120 * interrupt-retrigger: NOP for now. This may not be appropriate for all
7121diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
7122index 46dfc3c..a16b13a 100644
7123--- a/arch/mips/include/asm/local.h
7124+++ b/arch/mips/include/asm/local.h
7125@@ -12,15 +12,25 @@ typedef struct
7126 atomic_long_t a;
7127 } local_t;
7128
7129+typedef struct {
7130+ atomic_long_unchecked_t a;
7131+} local_unchecked_t;
7132+
7133 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
7134
7135 #define local_read(l) atomic_long_read(&(l)->a)
7136+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
7137 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
7138+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
7139
7140 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
7141+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
7142 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
7143+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
7144 #define local_inc(l) atomic_long_inc(&(l)->a)
7145+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
7146 #define local_dec(l) atomic_long_dec(&(l)->a)
7147+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
7148
7149 /*
7150 * Same as above, but return the result value
7151@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
7152 return result;
7153 }
7154
7155+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
7156+{
7157+ unsigned long result;
7158+
7159+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
7160+ unsigned long temp;
7161+
7162+ __asm__ __volatile__(
7163+ " .set mips3 \n"
7164+ "1:" __LL "%1, %2 # local_add_return \n"
7165+ " addu %0, %1, %3 \n"
7166+ __SC "%0, %2 \n"
7167+ " beqzl %0, 1b \n"
7168+ " addu %0, %1, %3 \n"
7169+ " .set mips0 \n"
7170+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
7171+ : "Ir" (i), "m" (l->a.counter)
7172+ : "memory");
7173+ } else if (kernel_uses_llsc) {
7174+ unsigned long temp;
7175+
7176+ __asm__ __volatile__(
7177+ " .set mips3 \n"
7178+ "1:" __LL "%1, %2 # local_add_return \n"
7179+ " addu %0, %1, %3 \n"
7180+ __SC "%0, %2 \n"
7181+ " beqz %0, 1b \n"
7182+ " addu %0, %1, %3 \n"
7183+ " .set mips0 \n"
7184+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
7185+ : "Ir" (i), "m" (l->a.counter)
7186+ : "memory");
7187+ } else {
7188+ unsigned long flags;
7189+
7190+ local_irq_save(flags);
7191+ result = l->a.counter;
7192+ result += i;
7193+ l->a.counter = result;
7194+ local_irq_restore(flags);
7195+ }
7196+
7197+ return result;
7198+}
7199+
7200 static __inline__ long local_sub_return(long i, local_t * l)
7201 {
7202 unsigned long result;
7203@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
7204
7205 #define local_cmpxchg(l, o, n) \
7206 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
7207+#define local_cmpxchg_unchecked(l, o, n) \
7208+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
7209 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
7210
7211 /**
7212diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
7213index 5699ec3..95def83 100644
7214--- a/arch/mips/include/asm/page.h
7215+++ b/arch/mips/include/asm/page.h
7216@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
7217 #ifdef CONFIG_CPU_MIPS32
7218 typedef struct { unsigned long pte_low, pte_high; } pte_t;
7219 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
7220- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
7221+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
7222 #else
7223 typedef struct { unsigned long long pte; } pte_t;
7224 #define pte_val(x) ((x).pte)
7225diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
7226index b336037..5b874cc 100644
7227--- a/arch/mips/include/asm/pgalloc.h
7228+++ b/arch/mips/include/asm/pgalloc.h
7229@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7230 {
7231 set_pud(pud, __pud((unsigned long)pmd));
7232 }
7233+
7234+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7235+{
7236+ pud_populate(mm, pud, pmd);
7237+}
7238 #endif
7239
7240 /*
7241diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
7242index 539ddd1..8783f9a 100644
7243--- a/arch/mips/include/asm/pgtable.h
7244+++ b/arch/mips/include/asm/pgtable.h
7245@@ -20,6 +20,9 @@
7246 #include <asm/io.h>
7247 #include <asm/pgtable-bits.h>
7248
7249+#define ktla_ktva(addr) (addr)
7250+#define ktva_ktla(addr) (addr)
7251+
7252 struct mm_struct;
7253 struct vm_area_struct;
7254
7255diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
7256index 7de8658..c109224 100644
7257--- a/arch/mips/include/asm/thread_info.h
7258+++ b/arch/mips/include/asm/thread_info.h
7259@@ -105,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
7260 #define TIF_SECCOMP 4 /* secure computing */
7261 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
7262 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
7263+/* li takes a 32bit immediate */
7264+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
7265+
7266 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
7267 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
7268 #define TIF_NOHZ 19 /* in adaptive nohz mode */
7269@@ -138,14 +141,16 @@ static inline struct thread_info *current_thread_info(void)
7270 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
7271 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
7272 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7273+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7274
7275 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
7276 _TIF_SYSCALL_AUDIT | \
7277- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
7278+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
7279+ _TIF_GRSEC_SETXID)
7280
7281 /* work to do in syscall_trace_leave() */
7282 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
7283- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
7284+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
7285
7286 /* work to do on interrupt/exception return */
7287 #define _TIF_WORK_MASK \
7288@@ -153,7 +158,7 @@ static inline struct thread_info *current_thread_info(void)
7289 /* work to do on any return to u-space */
7290 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
7291 _TIF_WORK_SYSCALL_EXIT | \
7292- _TIF_SYSCALL_TRACEPOINT)
7293+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
7294
7295 /*
7296 * We stash processor id into a COP0 register to retrieve it fast
7297diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
7298index a109510..94ee3f6 100644
7299--- a/arch/mips/include/asm/uaccess.h
7300+++ b/arch/mips/include/asm/uaccess.h
7301@@ -130,6 +130,7 @@ extern u64 __ua_limit;
7302 __ok == 0; \
7303 })
7304
7305+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
7306 #define access_ok(type, addr, size) \
7307 likely(__access_ok((addr), (size), __access_mask))
7308
7309diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
7310index 1188e00..41cf144 100644
7311--- a/arch/mips/kernel/binfmt_elfn32.c
7312+++ b/arch/mips/kernel/binfmt_elfn32.c
7313@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
7314 #undef ELF_ET_DYN_BASE
7315 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
7316
7317+#ifdef CONFIG_PAX_ASLR
7318+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7319+
7320+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7321+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7322+#endif
7323+
7324 #include <asm/processor.h>
7325 #include <linux/module.h>
7326 #include <linux/elfcore.h>
7327diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
7328index 7faf5f2..f3d3cf4 100644
7329--- a/arch/mips/kernel/binfmt_elfo32.c
7330+++ b/arch/mips/kernel/binfmt_elfo32.c
7331@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
7332 #undef ELF_ET_DYN_BASE
7333 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
7334
7335+#ifdef CONFIG_PAX_ASLR
7336+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7337+
7338+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7339+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7340+#endif
7341+
7342 #include <asm/processor.h>
7343
7344 /*
7345diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
7346index 50b3648..c2f3cec 100644
7347--- a/arch/mips/kernel/i8259.c
7348+++ b/arch/mips/kernel/i8259.c
7349@@ -201,7 +201,7 @@ spurious_8259A_irq:
7350 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
7351 spurious_irq_mask |= irqmask;
7352 }
7353- atomic_inc(&irq_err_count);
7354+ atomic_inc_unchecked(&irq_err_count);
7355 /*
7356 * Theoretically we do not have to handle this IRQ,
7357 * but in Linux this does not cause problems and is
7358diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
7359index 44a1f79..2bd6aa3 100644
7360--- a/arch/mips/kernel/irq-gt641xx.c
7361+++ b/arch/mips/kernel/irq-gt641xx.c
7362@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
7363 }
7364 }
7365
7366- atomic_inc(&irq_err_count);
7367+ atomic_inc_unchecked(&irq_err_count);
7368 }
7369
7370 void __init gt641xx_irq_init(void)
7371diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
7372index d2bfbc2..a8eacd2 100644
7373--- a/arch/mips/kernel/irq.c
7374+++ b/arch/mips/kernel/irq.c
7375@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
7376 printk("unexpected IRQ # %d\n", irq);
7377 }
7378
7379-atomic_t irq_err_count;
7380+atomic_unchecked_t irq_err_count;
7381
7382 int arch_show_interrupts(struct seq_file *p, int prec)
7383 {
7384- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
7385+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
7386 return 0;
7387 }
7388
7389 asmlinkage void spurious_interrupt(void)
7390 {
7391- atomic_inc(&irq_err_count);
7392+ atomic_inc_unchecked(&irq_err_count);
7393 }
7394
7395 void __init init_IRQ(void)
7396@@ -109,7 +109,10 @@ void __init init_IRQ(void)
7397 #endif
7398 }
7399
7400+
7401 #ifdef DEBUG_STACKOVERFLOW
7402+extern void gr_handle_kernel_exploit(void);
7403+
7404 static inline void check_stack_overflow(void)
7405 {
7406 unsigned long sp;
7407@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
7408 printk("do_IRQ: stack overflow: %ld\n",
7409 sp - sizeof(struct thread_info));
7410 dump_stack();
7411+ gr_handle_kernel_exploit();
7412 }
7413 }
7414 #else
7415diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
7416index c4c2069..bde8051 100644
7417--- a/arch/mips/kernel/pm-cps.c
7418+++ b/arch/mips/kernel/pm-cps.c
7419@@ -168,7 +168,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
7420 nc_core_ready_count = nc_addr;
7421
7422 /* Ensure ready_count is zero-initialised before the assembly runs */
7423- ACCESS_ONCE(*nc_core_ready_count) = 0;
7424+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
7425 coupled_barrier(&per_cpu(pm_barrier, core), online);
7426
7427 /* Run the generated entry code */
7428diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
7429index 0a1ec0f..d9e93b6 100644
7430--- a/arch/mips/kernel/process.c
7431+++ b/arch/mips/kernel/process.c
7432@@ -572,15 +572,3 @@ unsigned long get_wchan(struct task_struct *task)
7433 out:
7434 return pc;
7435 }
7436-
7437-/*
7438- * Don't forget that the stack pointer must be aligned on a 8 bytes
7439- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
7440- */
7441-unsigned long arch_align_stack(unsigned long sp)
7442-{
7443- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7444- sp -= get_random_int() & ~PAGE_MASK;
7445-
7446- return sp & ALMASK;
7447-}
7448diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
7449index f639ccd..e4b110d 100644
7450--- a/arch/mips/kernel/ptrace.c
7451+++ b/arch/mips/kernel/ptrace.c
7452@@ -630,6 +630,10 @@ long arch_ptrace(struct task_struct *child, long request,
7453 return ret;
7454 }
7455
7456+#ifdef CONFIG_GRKERNSEC_SETXID
7457+extern void gr_delayed_cred_worker(void);
7458+#endif
7459+
7460 /*
7461 * Notification of system call entry/exit
7462 * - triggered by current->work.syscall_trace
7463@@ -646,6 +650,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
7464 tracehook_report_syscall_entry(regs))
7465 ret = -1;
7466
7467+#ifdef CONFIG_GRKERNSEC_SETXID
7468+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7469+ gr_delayed_cred_worker();
7470+#endif
7471+
7472 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7473 trace_sys_enter(regs, regs->regs[2]);
7474
7475diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
7476index 07fc524..b9d7f28 100644
7477--- a/arch/mips/kernel/reset.c
7478+++ b/arch/mips/kernel/reset.c
7479@@ -13,6 +13,7 @@
7480 #include <linux/reboot.h>
7481
7482 #include <asm/reboot.h>
7483+#include <asm/bug.h>
7484
7485 /*
7486 * Urgs ... Too many MIPS machines to handle this in a generic way.
7487@@ -29,16 +30,19 @@ void machine_restart(char *command)
7488 {
7489 if (_machine_restart)
7490 _machine_restart(command);
7491+ BUG();
7492 }
7493
7494 void machine_halt(void)
7495 {
7496 if (_machine_halt)
7497 _machine_halt();
7498+ BUG();
7499 }
7500
7501 void machine_power_off(void)
7502 {
7503 if (pm_power_off)
7504 pm_power_off();
7505+ BUG();
7506 }
7507diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
7508index 2242bdd..b284048 100644
7509--- a/arch/mips/kernel/sync-r4k.c
7510+++ b/arch/mips/kernel/sync-r4k.c
7511@@ -18,8 +18,8 @@
7512 #include <asm/mipsregs.h>
7513
7514 static atomic_t count_start_flag = ATOMIC_INIT(0);
7515-static atomic_t count_count_start = ATOMIC_INIT(0);
7516-static atomic_t count_count_stop = ATOMIC_INIT(0);
7517+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
7518+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
7519 static atomic_t count_reference = ATOMIC_INIT(0);
7520
7521 #define COUNTON 100
7522@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
7523
7524 for (i = 0; i < NR_LOOPS; i++) {
7525 /* slaves loop on '!= 2' */
7526- while (atomic_read(&count_count_start) != 1)
7527+ while (atomic_read_unchecked(&count_count_start) != 1)
7528 mb();
7529- atomic_set(&count_count_stop, 0);
7530+ atomic_set_unchecked(&count_count_stop, 0);
7531 smp_wmb();
7532
7533 /* this lets the slaves write their count register */
7534- atomic_inc(&count_count_start);
7535+ atomic_inc_unchecked(&count_count_start);
7536
7537 /*
7538 * Everyone initialises count in the last loop:
7539@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
7540 /*
7541 * Wait for all slaves to leave the synchronization point:
7542 */
7543- while (atomic_read(&count_count_stop) != 1)
7544+ while (atomic_read_unchecked(&count_count_stop) != 1)
7545 mb();
7546- atomic_set(&count_count_start, 0);
7547+ atomic_set_unchecked(&count_count_start, 0);
7548 smp_wmb();
7549- atomic_inc(&count_count_stop);
7550+ atomic_inc_unchecked(&count_count_stop);
7551 }
7552 /* Arrange for an interrupt in a short while */
7553 write_c0_compare(read_c0_count() + COUNTON);
7554@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
7555 initcount = atomic_read(&count_reference);
7556
7557 for (i = 0; i < NR_LOOPS; i++) {
7558- atomic_inc(&count_count_start);
7559- while (atomic_read(&count_count_start) != 2)
7560+ atomic_inc_unchecked(&count_count_start);
7561+ while (atomic_read_unchecked(&count_count_start) != 2)
7562 mb();
7563
7564 /*
7565@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
7566 if (i == NR_LOOPS-1)
7567 write_c0_count(initcount);
7568
7569- atomic_inc(&count_count_stop);
7570- while (atomic_read(&count_count_stop) != 2)
7571+ atomic_inc_unchecked(&count_count_stop);
7572+ while (atomic_read_unchecked(&count_count_stop) != 2)
7573 mb();
7574 }
7575 /* Arrange for an interrupt in a short while */
7576diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
7577index 51706d6..ec1178c 100644
7578--- a/arch/mips/kernel/traps.c
7579+++ b/arch/mips/kernel/traps.c
7580@@ -687,7 +687,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
7581 siginfo_t info;
7582
7583 prev_state = exception_enter();
7584- die_if_kernel("Integer overflow", regs);
7585+ if (unlikely(!user_mode(regs))) {
7586+
7587+#ifdef CONFIG_PAX_REFCOUNT
7588+ if (fixup_exception(regs)) {
7589+ pax_report_refcount_overflow(regs);
7590+ exception_exit(prev_state);
7591+ return;
7592+ }
7593+#endif
7594+
7595+ die("Integer overflow", regs);
7596+ }
7597
7598 info.si_code = FPE_INTOVF;
7599 info.si_signo = SIGFPE;
7600diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
7601index becc42b..9e43d4b 100644
7602--- a/arch/mips/mm/fault.c
7603+++ b/arch/mips/mm/fault.c
7604@@ -28,6 +28,23 @@
7605 #include <asm/highmem.h> /* For VMALLOC_END */
7606 #include <linux/kdebug.h>
7607
7608+#ifdef CONFIG_PAX_PAGEEXEC
7609+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7610+{
7611+ unsigned long i;
7612+
7613+ printk(KERN_ERR "PAX: bytes at PC: ");
7614+ for (i = 0; i < 5; i++) {
7615+ unsigned int c;
7616+ if (get_user(c, (unsigned int *)pc+i))
7617+ printk(KERN_CONT "???????? ");
7618+ else
7619+ printk(KERN_CONT "%08x ", c);
7620+ }
7621+ printk("\n");
7622+}
7623+#endif
7624+
7625 /*
7626 * This routine handles page faults. It determines the address,
7627 * and the problem, and then passes it off to one of the appropriate
7628@@ -199,6 +216,14 @@ bad_area:
7629 bad_area_nosemaphore:
7630 /* User mode accesses just cause a SIGSEGV */
7631 if (user_mode(regs)) {
7632+
7633+#ifdef CONFIG_PAX_PAGEEXEC
7634+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
7635+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
7636+ do_group_exit(SIGKILL);
7637+ }
7638+#endif
7639+
7640 tsk->thread.cp0_badvaddr = address;
7641 tsk->thread.error_code = write;
7642 #if 0
7643diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
7644index f1baadd..5472dca 100644
7645--- a/arch/mips/mm/mmap.c
7646+++ b/arch/mips/mm/mmap.c
7647@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7648 struct vm_area_struct *vma;
7649 unsigned long addr = addr0;
7650 int do_color_align;
7651+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7652 struct vm_unmapped_area_info info;
7653
7654 if (unlikely(len > TASK_SIZE))
7655@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7656 do_color_align = 1;
7657
7658 /* requesting a specific address */
7659+
7660+#ifdef CONFIG_PAX_RANDMMAP
7661+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7662+#endif
7663+
7664 if (addr) {
7665 if (do_color_align)
7666 addr = COLOUR_ALIGN(addr, pgoff);
7667@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7668 addr = PAGE_ALIGN(addr);
7669
7670 vma = find_vma(mm, addr);
7671- if (TASK_SIZE - len >= addr &&
7672- (!vma || addr + len <= vma->vm_start))
7673+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7674 return addr;
7675 }
7676
7677 info.length = len;
7678 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
7679 info.align_offset = pgoff << PAGE_SHIFT;
7680+ info.threadstack_offset = offset;
7681
7682 if (dir == DOWN) {
7683 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
7684@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7685 {
7686 unsigned long random_factor = 0UL;
7687
7688+#ifdef CONFIG_PAX_RANDMMAP
7689+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7690+#endif
7691+
7692 if (current->flags & PF_RANDOMIZE) {
7693 random_factor = get_random_int();
7694 random_factor = random_factor << PAGE_SHIFT;
7695@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7696
7697 if (mmap_is_legacy()) {
7698 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7699+
7700+#ifdef CONFIG_PAX_RANDMMAP
7701+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7702+ mm->mmap_base += mm->delta_mmap;
7703+#endif
7704+
7705 mm->get_unmapped_area = arch_get_unmapped_area;
7706 } else {
7707 mm->mmap_base = mmap_base(random_factor);
7708+
7709+#ifdef CONFIG_PAX_RANDMMAP
7710+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7711+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7712+#endif
7713+
7714 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7715 }
7716 }
7717
7718-static inline unsigned long brk_rnd(void)
7719-{
7720- unsigned long rnd = get_random_int();
7721-
7722- rnd = rnd << PAGE_SHIFT;
7723- /* 8MB for 32bit, 256MB for 64bit */
7724- if (TASK_IS_32BIT_ADDR)
7725- rnd = rnd & 0x7ffffful;
7726- else
7727- rnd = rnd & 0xffffffful;
7728-
7729- return rnd;
7730-}
7731-
7732-unsigned long arch_randomize_brk(struct mm_struct *mm)
7733-{
7734- unsigned long base = mm->brk;
7735- unsigned long ret;
7736-
7737- ret = PAGE_ALIGN(base + brk_rnd());
7738-
7739- if (ret < mm->brk)
7740- return mm->brk;
7741-
7742- return ret;
7743-}
7744-
7745 int __virt_addr_valid(const volatile void *kaddr)
7746 {
7747 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7748diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7749index 59cccd9..f39ac2f 100644
7750--- a/arch/mips/pci/pci-octeon.c
7751+++ b/arch/mips/pci/pci-octeon.c
7752@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7753
7754
7755 static struct pci_ops octeon_pci_ops = {
7756- octeon_read_config,
7757- octeon_write_config,
7758+ .read = octeon_read_config,
7759+ .write = octeon_write_config,
7760 };
7761
7762 static struct resource octeon_pci_mem_resource = {
7763diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7764index 5e36c33..eb4a17b 100644
7765--- a/arch/mips/pci/pcie-octeon.c
7766+++ b/arch/mips/pci/pcie-octeon.c
7767@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7768 }
7769
7770 static struct pci_ops octeon_pcie0_ops = {
7771- octeon_pcie0_read_config,
7772- octeon_pcie0_write_config,
7773+ .read = octeon_pcie0_read_config,
7774+ .write = octeon_pcie0_write_config,
7775 };
7776
7777 static struct resource octeon_pcie0_mem_resource = {
7778@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7779 };
7780
7781 static struct pci_ops octeon_pcie1_ops = {
7782- octeon_pcie1_read_config,
7783- octeon_pcie1_write_config,
7784+ .read = octeon_pcie1_read_config,
7785+ .write = octeon_pcie1_write_config,
7786 };
7787
7788 static struct resource octeon_pcie1_mem_resource = {
7789@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7790 };
7791
7792 static struct pci_ops octeon_dummy_ops = {
7793- octeon_dummy_read_config,
7794- octeon_dummy_write_config,
7795+ .read = octeon_dummy_read_config,
7796+ .write = octeon_dummy_write_config,
7797 };
7798
7799 static struct resource octeon_dummy_mem_resource = {
7800diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7801index a2358b4..7cead4f 100644
7802--- a/arch/mips/sgi-ip27/ip27-nmi.c
7803+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7804@@ -187,9 +187,9 @@ void
7805 cont_nmi_dump(void)
7806 {
7807 #ifndef REAL_NMI_SIGNAL
7808- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7809+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7810
7811- atomic_inc(&nmied_cpus);
7812+ atomic_inc_unchecked(&nmied_cpus);
7813 #endif
7814 /*
7815 * Only allow 1 cpu to proceed
7816@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7817 udelay(10000);
7818 }
7819 #else
7820- while (atomic_read(&nmied_cpus) != num_online_cpus());
7821+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7822 #endif
7823
7824 /*
7825diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7826index a046b30..6799527 100644
7827--- a/arch/mips/sni/rm200.c
7828+++ b/arch/mips/sni/rm200.c
7829@@ -270,7 +270,7 @@ spurious_8259A_irq:
7830 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7831 spurious_irq_mask |= irqmask;
7832 }
7833- atomic_inc(&irq_err_count);
7834+ atomic_inc_unchecked(&irq_err_count);
7835 /*
7836 * Theoretically we do not have to handle this IRQ,
7837 * but in Linux this does not cause problems and is
7838diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7839index 41e873b..34d33a7 100644
7840--- a/arch/mips/vr41xx/common/icu.c
7841+++ b/arch/mips/vr41xx/common/icu.c
7842@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7843
7844 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7845
7846- atomic_inc(&irq_err_count);
7847+ atomic_inc_unchecked(&irq_err_count);
7848
7849 return -1;
7850 }
7851diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7852index ae0e4ee..e8f0692 100644
7853--- a/arch/mips/vr41xx/common/irq.c
7854+++ b/arch/mips/vr41xx/common/irq.c
7855@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7856 irq_cascade_t *cascade;
7857
7858 if (irq >= NR_IRQS) {
7859- atomic_inc(&irq_err_count);
7860+ atomic_inc_unchecked(&irq_err_count);
7861 return;
7862 }
7863
7864@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7865 ret = cascade->get_irq(irq);
7866 irq = ret;
7867 if (ret < 0)
7868- atomic_inc(&irq_err_count);
7869+ atomic_inc_unchecked(&irq_err_count);
7870 else
7871 irq_dispatch(irq);
7872 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7873diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7874index 967d144..db12197 100644
7875--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7876+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7877@@ -11,12 +11,14 @@
7878 #ifndef _ASM_PROC_CACHE_H
7879 #define _ASM_PROC_CACHE_H
7880
7881+#include <linux/const.h>
7882+
7883 /* L1 cache */
7884
7885 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7886 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7887-#define L1_CACHE_BYTES 16 /* bytes per entry */
7888 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7889+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7890 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7891
7892 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7893diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7894index bcb5df2..84fabd2 100644
7895--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7896+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7897@@ -16,13 +16,15 @@
7898 #ifndef _ASM_PROC_CACHE_H
7899 #define _ASM_PROC_CACHE_H
7900
7901+#include <linux/const.h>
7902+
7903 /*
7904 * L1 cache
7905 */
7906 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7907 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7908-#define L1_CACHE_BYTES 32 /* bytes per entry */
7909 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7910+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7911 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7912
7913 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7914diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7915index 4ce7a01..449202a 100644
7916--- a/arch/openrisc/include/asm/cache.h
7917+++ b/arch/openrisc/include/asm/cache.h
7918@@ -19,11 +19,13 @@
7919 #ifndef __ASM_OPENRISC_CACHE_H
7920 #define __ASM_OPENRISC_CACHE_H
7921
7922+#include <linux/const.h>
7923+
7924 /* FIXME: How can we replace these with values from the CPU...
7925 * they shouldn't be hard-coded!
7926 */
7927
7928-#define L1_CACHE_BYTES 16
7929 #define L1_CACHE_SHIFT 4
7930+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7931
7932 #endif /* __ASM_OPENRISC_CACHE_H */
7933diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7934index 0be2db2..1b0f26d 100644
7935--- a/arch/parisc/include/asm/atomic.h
7936+++ b/arch/parisc/include/asm/atomic.h
7937@@ -248,6 +248,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7938 return dec;
7939 }
7940
7941+#define atomic64_read_unchecked(v) atomic64_read(v)
7942+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7943+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7944+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7945+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7946+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7947+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7948+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7949+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7950+
7951 #endif /* !CONFIG_64BIT */
7952
7953
7954diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7955index 47f11c7..3420df2 100644
7956--- a/arch/parisc/include/asm/cache.h
7957+++ b/arch/parisc/include/asm/cache.h
7958@@ -5,6 +5,7 @@
7959 #ifndef __ARCH_PARISC_CACHE_H
7960 #define __ARCH_PARISC_CACHE_H
7961
7962+#include <linux/const.h>
7963
7964 /*
7965 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7966@@ -15,13 +16,13 @@
7967 * just ruin performance.
7968 */
7969 #ifdef CONFIG_PA20
7970-#define L1_CACHE_BYTES 64
7971 #define L1_CACHE_SHIFT 6
7972 #else
7973-#define L1_CACHE_BYTES 32
7974 #define L1_CACHE_SHIFT 5
7975 #endif
7976
7977+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7978+
7979 #ifndef __ASSEMBLY__
7980
7981 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7982diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7983index 3391d06..c23a2cc 100644
7984--- a/arch/parisc/include/asm/elf.h
7985+++ b/arch/parisc/include/asm/elf.h
7986@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7987
7988 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7989
7990+#ifdef CONFIG_PAX_ASLR
7991+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7992+
7993+#define PAX_DELTA_MMAP_LEN 16
7994+#define PAX_DELTA_STACK_LEN 16
7995+#endif
7996+
7997 /* This yields a mask that user programs can use to figure out what
7998 instruction set this CPU supports. This could be done in user space,
7999 but it's not easy, and we've already done it here. */
8000diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
8001index f213f5b..0af3e8e 100644
8002--- a/arch/parisc/include/asm/pgalloc.h
8003+++ b/arch/parisc/include/asm/pgalloc.h
8004@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
8005 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
8006 }
8007
8008+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
8009+{
8010+ pgd_populate(mm, pgd, pmd);
8011+}
8012+
8013 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
8014 {
8015 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
8016@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
8017 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
8018 #define pmd_free(mm, x) do { } while (0)
8019 #define pgd_populate(mm, pmd, pte) BUG()
8020+#define pgd_populate_kernel(mm, pmd, pte) BUG()
8021
8022 #endif
8023
8024diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
8025index 22b89d1..ce34230 100644
8026--- a/arch/parisc/include/asm/pgtable.h
8027+++ b/arch/parisc/include/asm/pgtable.h
8028@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
8029 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
8030 #define PAGE_COPY PAGE_EXECREAD
8031 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
8032+
8033+#ifdef CONFIG_PAX_PAGEEXEC
8034+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
8035+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
8036+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
8037+#else
8038+# define PAGE_SHARED_NOEXEC PAGE_SHARED
8039+# define PAGE_COPY_NOEXEC PAGE_COPY
8040+# define PAGE_READONLY_NOEXEC PAGE_READONLY
8041+#endif
8042+
8043 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
8044 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
8045 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
8046diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
8047index 4006964..fcb3cc2 100644
8048--- a/arch/parisc/include/asm/uaccess.h
8049+++ b/arch/parisc/include/asm/uaccess.h
8050@@ -246,10 +246,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
8051 const void __user *from,
8052 unsigned long n)
8053 {
8054- int sz = __compiletime_object_size(to);
8055+ size_t sz = __compiletime_object_size(to);
8056 int ret = -EFAULT;
8057
8058- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
8059+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
8060 ret = __copy_from_user(to, from, n);
8061 else
8062 copy_from_user_overflow();
8063diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
8064index 50dfafc..b9fc230 100644
8065--- a/arch/parisc/kernel/module.c
8066+++ b/arch/parisc/kernel/module.c
8067@@ -98,16 +98,38 @@
8068
8069 /* three functions to determine where in the module core
8070 * or init pieces the location is */
8071+static inline int in_init_rx(struct module *me, void *loc)
8072+{
8073+ return (loc >= me->module_init_rx &&
8074+ loc < (me->module_init_rx + me->init_size_rx));
8075+}
8076+
8077+static inline int in_init_rw(struct module *me, void *loc)
8078+{
8079+ return (loc >= me->module_init_rw &&
8080+ loc < (me->module_init_rw + me->init_size_rw));
8081+}
8082+
8083 static inline int in_init(struct module *me, void *loc)
8084 {
8085- return (loc >= me->module_init &&
8086- loc <= (me->module_init + me->init_size));
8087+ return in_init_rx(me, loc) || in_init_rw(me, loc);
8088+}
8089+
8090+static inline int in_core_rx(struct module *me, void *loc)
8091+{
8092+ return (loc >= me->module_core_rx &&
8093+ loc < (me->module_core_rx + me->core_size_rx));
8094+}
8095+
8096+static inline int in_core_rw(struct module *me, void *loc)
8097+{
8098+ return (loc >= me->module_core_rw &&
8099+ loc < (me->module_core_rw + me->core_size_rw));
8100 }
8101
8102 static inline int in_core(struct module *me, void *loc)
8103 {
8104- return (loc >= me->module_core &&
8105- loc <= (me->module_core + me->core_size));
8106+ return in_core_rx(me, loc) || in_core_rw(me, loc);
8107 }
8108
8109 static inline int in_local(struct module *me, void *loc)
8110@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
8111 }
8112
8113 /* align things a bit */
8114- me->core_size = ALIGN(me->core_size, 16);
8115- me->arch.got_offset = me->core_size;
8116- me->core_size += gots * sizeof(struct got_entry);
8117+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
8118+ me->arch.got_offset = me->core_size_rw;
8119+ me->core_size_rw += gots * sizeof(struct got_entry);
8120
8121- me->core_size = ALIGN(me->core_size, 16);
8122- me->arch.fdesc_offset = me->core_size;
8123- me->core_size += fdescs * sizeof(Elf_Fdesc);
8124+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
8125+ me->arch.fdesc_offset = me->core_size_rw;
8126+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
8127
8128 me->arch.got_max = gots;
8129 me->arch.fdesc_max = fdescs;
8130@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
8131
8132 BUG_ON(value == 0);
8133
8134- got = me->module_core + me->arch.got_offset;
8135+ got = me->module_core_rw + me->arch.got_offset;
8136 for (i = 0; got[i].addr; i++)
8137 if (got[i].addr == value)
8138 goto out;
8139@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
8140 #ifdef CONFIG_64BIT
8141 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
8142 {
8143- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
8144+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
8145
8146 if (!value) {
8147 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
8148@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
8149
8150 /* Create new one */
8151 fdesc->addr = value;
8152- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
8153+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
8154 return (Elf_Addr)fdesc;
8155 }
8156 #endif /* CONFIG_64BIT */
8157@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
8158
8159 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
8160 end = table + sechdrs[me->arch.unwind_section].sh_size;
8161- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
8162+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
8163
8164 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
8165 me->arch.unwind_section, table, end, gp);
8166diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
8167index e1ffea2..46ed66e 100644
8168--- a/arch/parisc/kernel/sys_parisc.c
8169+++ b/arch/parisc/kernel/sys_parisc.c
8170@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8171 unsigned long task_size = TASK_SIZE;
8172 int do_color_align, last_mmap;
8173 struct vm_unmapped_area_info info;
8174+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
8175
8176 if (len > task_size)
8177 return -ENOMEM;
8178@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8179 goto found_addr;
8180 }
8181
8182+#ifdef CONFIG_PAX_RANDMMAP
8183+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8184+#endif
8185+
8186 if (addr) {
8187 if (do_color_align && last_mmap)
8188 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
8189@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8190 info.high_limit = mmap_upper_limit();
8191 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
8192 info.align_offset = shared_align_offset(last_mmap, pgoff);
8193+ info.threadstack_offset = offset;
8194 addr = vm_unmapped_area(&info);
8195
8196 found_addr:
8197@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8198 unsigned long addr = addr0;
8199 int do_color_align, last_mmap;
8200 struct vm_unmapped_area_info info;
8201+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
8202
8203 #ifdef CONFIG_64BIT
8204 /* This should only ever run for 32-bit processes. */
8205@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8206 }
8207
8208 /* requesting a specific address */
8209+#ifdef CONFIG_PAX_RANDMMAP
8210+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8211+#endif
8212+
8213 if (addr) {
8214 if (do_color_align && last_mmap)
8215 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
8216@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8217 info.high_limit = mm->mmap_base;
8218 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
8219 info.align_offset = shared_align_offset(last_mmap, pgoff);
8220+ info.threadstack_offset = offset;
8221 addr = vm_unmapped_area(&info);
8222 if (!(addr & ~PAGE_MASK))
8223 goto found_addr;
8224@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8225 mm->mmap_legacy_base = mmap_legacy_base();
8226 mm->mmap_base = mmap_upper_limit();
8227
8228+#ifdef CONFIG_PAX_RANDMMAP
8229+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
8230+ mm->mmap_legacy_base += mm->delta_mmap;
8231+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8232+ }
8233+#endif
8234+
8235 if (mmap_is_legacy()) {
8236 mm->mmap_base = mm->mmap_legacy_base;
8237 mm->get_unmapped_area = arch_get_unmapped_area;
8238diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
8239index 47ee620..1107387 100644
8240--- a/arch/parisc/kernel/traps.c
8241+++ b/arch/parisc/kernel/traps.c
8242@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
8243
8244 down_read(&current->mm->mmap_sem);
8245 vma = find_vma(current->mm,regs->iaoq[0]);
8246- if (vma && (regs->iaoq[0] >= vma->vm_start)
8247- && (vma->vm_flags & VM_EXEC)) {
8248-
8249+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
8250 fault_address = regs->iaoq[0];
8251 fault_space = regs->iasq[0];
8252
8253diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
8254index 3ca9c11..d163ef7 100644
8255--- a/arch/parisc/mm/fault.c
8256+++ b/arch/parisc/mm/fault.c
8257@@ -15,6 +15,7 @@
8258 #include <linux/sched.h>
8259 #include <linux/interrupt.h>
8260 #include <linux/module.h>
8261+#include <linux/unistd.h>
8262
8263 #include <asm/uaccess.h>
8264 #include <asm/traps.h>
8265@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
8266 static unsigned long
8267 parisc_acctyp(unsigned long code, unsigned int inst)
8268 {
8269- if (code == 6 || code == 16)
8270+ if (code == 6 || code == 7 || code == 16)
8271 return VM_EXEC;
8272
8273 switch (inst & 0xf0000000) {
8274@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
8275 }
8276 #endif
8277
8278+#ifdef CONFIG_PAX_PAGEEXEC
8279+/*
8280+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
8281+ *
8282+ * returns 1 when task should be killed
8283+ * 2 when rt_sigreturn trampoline was detected
8284+ * 3 when unpatched PLT trampoline was detected
8285+ */
8286+static int pax_handle_fetch_fault(struct pt_regs *regs)
8287+{
8288+
8289+#ifdef CONFIG_PAX_EMUPLT
8290+ int err;
8291+
8292+ do { /* PaX: unpatched PLT emulation */
8293+ unsigned int bl, depwi;
8294+
8295+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
8296+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
8297+
8298+ if (err)
8299+ break;
8300+
8301+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
8302+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
8303+
8304+ err = get_user(ldw, (unsigned int *)addr);
8305+ err |= get_user(bv, (unsigned int *)(addr+4));
8306+ err |= get_user(ldw2, (unsigned int *)(addr+8));
8307+
8308+ if (err)
8309+ break;
8310+
8311+ if (ldw == 0x0E801096U &&
8312+ bv == 0xEAC0C000U &&
8313+ ldw2 == 0x0E881095U)
8314+ {
8315+ unsigned int resolver, map;
8316+
8317+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
8318+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
8319+ if (err)
8320+ break;
8321+
8322+ regs->gr[20] = instruction_pointer(regs)+8;
8323+ regs->gr[21] = map;
8324+ regs->gr[22] = resolver;
8325+ regs->iaoq[0] = resolver | 3UL;
8326+ regs->iaoq[1] = regs->iaoq[0] + 4;
8327+ return 3;
8328+ }
8329+ }
8330+ } while (0);
8331+#endif
8332+
8333+#ifdef CONFIG_PAX_EMUTRAMP
8334+
8335+#ifndef CONFIG_PAX_EMUSIGRT
8336+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
8337+ return 1;
8338+#endif
8339+
8340+ do { /* PaX: rt_sigreturn emulation */
8341+ unsigned int ldi1, ldi2, bel, nop;
8342+
8343+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
8344+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
8345+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
8346+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
8347+
8348+ if (err)
8349+ break;
8350+
8351+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
8352+ ldi2 == 0x3414015AU &&
8353+ bel == 0xE4008200U &&
8354+ nop == 0x08000240U)
8355+ {
8356+ regs->gr[25] = (ldi1 & 2) >> 1;
8357+ regs->gr[20] = __NR_rt_sigreturn;
8358+ regs->gr[31] = regs->iaoq[1] + 16;
8359+ regs->sr[0] = regs->iasq[1];
8360+ regs->iaoq[0] = 0x100UL;
8361+ regs->iaoq[1] = regs->iaoq[0] + 4;
8362+ regs->iasq[0] = regs->sr[2];
8363+ regs->iasq[1] = regs->sr[2];
8364+ return 2;
8365+ }
8366+ } while (0);
8367+#endif
8368+
8369+ return 1;
8370+}
8371+
8372+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8373+{
8374+ unsigned long i;
8375+
8376+ printk(KERN_ERR "PAX: bytes at PC: ");
8377+ for (i = 0; i < 5; i++) {
8378+ unsigned int c;
8379+ if (get_user(c, (unsigned int *)pc+i))
8380+ printk(KERN_CONT "???????? ");
8381+ else
8382+ printk(KERN_CONT "%08x ", c);
8383+ }
8384+ printk("\n");
8385+}
8386+#endif
8387+
8388 int fixup_exception(struct pt_regs *regs)
8389 {
8390 const struct exception_table_entry *fix;
8391@@ -234,8 +345,33 @@ retry:
8392
8393 good_area:
8394
8395- if ((vma->vm_flags & acc_type) != acc_type)
8396+ if ((vma->vm_flags & acc_type) != acc_type) {
8397+
8398+#ifdef CONFIG_PAX_PAGEEXEC
8399+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
8400+ (address & ~3UL) == instruction_pointer(regs))
8401+ {
8402+ up_read(&mm->mmap_sem);
8403+ switch (pax_handle_fetch_fault(regs)) {
8404+
8405+#ifdef CONFIG_PAX_EMUPLT
8406+ case 3:
8407+ return;
8408+#endif
8409+
8410+#ifdef CONFIG_PAX_EMUTRAMP
8411+ case 2:
8412+ return;
8413+#endif
8414+
8415+ }
8416+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
8417+ do_group_exit(SIGKILL);
8418+ }
8419+#endif
8420+
8421 goto bad_area;
8422+ }
8423
8424 /*
8425 * If for any reason at all we couldn't handle the fault, make
8426diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
8427index 80b94b0..a3274fb 100644
8428--- a/arch/powerpc/Kconfig
8429+++ b/arch/powerpc/Kconfig
8430@@ -398,6 +398,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
8431 config KEXEC
8432 bool "kexec system call"
8433 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
8434+ depends on !GRKERNSEC_KMEM
8435 help
8436 kexec is a system call that implements the ability to shutdown your
8437 current kernel, and to start another kernel. It is like a reboot
8438diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
8439index 28992d0..c797b20 100644
8440--- a/arch/powerpc/include/asm/atomic.h
8441+++ b/arch/powerpc/include/asm/atomic.h
8442@@ -519,6 +519,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
8443 return t1;
8444 }
8445
8446+#define atomic64_read_unchecked(v) atomic64_read(v)
8447+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8448+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8449+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8450+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8451+#define atomic64_inc_unchecked(v) atomic64_inc(v)
8452+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8453+#define atomic64_dec_unchecked(v) atomic64_dec(v)
8454+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8455+
8456 #endif /* __powerpc64__ */
8457
8458 #endif /* __KERNEL__ */
8459diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8460index bab79a1..4a3eabc 100644
8461--- a/arch/powerpc/include/asm/barrier.h
8462+++ b/arch/powerpc/include/asm/barrier.h
8463@@ -73,7 +73,7 @@
8464 do { \
8465 compiletime_assert_atomic_type(*p); \
8466 __lwsync(); \
8467- ACCESS_ONCE(*p) = (v); \
8468+ ACCESS_ONCE_RW(*p) = (v); \
8469 } while (0)
8470
8471 #define smp_load_acquire(p) \
8472diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8473index ed0afc1..0332825 100644
8474--- a/arch/powerpc/include/asm/cache.h
8475+++ b/arch/powerpc/include/asm/cache.h
8476@@ -3,6 +3,7 @@
8477
8478 #ifdef __KERNEL__
8479
8480+#include <linux/const.h>
8481
8482 /* bytes per L1 cache line */
8483 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8484@@ -22,7 +23,7 @@
8485 #define L1_CACHE_SHIFT 7
8486 #endif
8487
8488-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8489+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8490
8491 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8492
8493diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8494index 888d8f3..66f581c 100644
8495--- a/arch/powerpc/include/asm/elf.h
8496+++ b/arch/powerpc/include/asm/elf.h
8497@@ -28,8 +28,19 @@
8498 the loader. We need to make sure that it is out of the way of the program
8499 that it will "exec", and that there is sufficient room for the brk. */
8500
8501-extern unsigned long randomize_et_dyn(unsigned long base);
8502-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
8503+#define ELF_ET_DYN_BASE (0x20000000)
8504+
8505+#ifdef CONFIG_PAX_ASLR
8506+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8507+
8508+#ifdef __powerpc64__
8509+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8510+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8511+#else
8512+#define PAX_DELTA_MMAP_LEN 15
8513+#define PAX_DELTA_STACK_LEN 15
8514+#endif
8515+#endif
8516
8517 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8518
8519@@ -129,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8520 (0x7ff >> (PAGE_SHIFT - 12)) : \
8521 (0x3ffff >> (PAGE_SHIFT - 12)))
8522
8523-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8524-#define arch_randomize_brk arch_randomize_brk
8525-
8526-
8527 #ifdef CONFIG_SPU_BASE
8528 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8529 #define NT_SPU 1
8530diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8531index 8196e9c..d83a9f3 100644
8532--- a/arch/powerpc/include/asm/exec.h
8533+++ b/arch/powerpc/include/asm/exec.h
8534@@ -4,6 +4,6 @@
8535 #ifndef _ASM_POWERPC_EXEC_H
8536 #define _ASM_POWERPC_EXEC_H
8537
8538-extern unsigned long arch_align_stack(unsigned long sp);
8539+#define arch_align_stack(x) ((x) & ~0xfUL)
8540
8541 #endif /* _ASM_POWERPC_EXEC_H */
8542diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8543index 5acabbd..7ea14fa 100644
8544--- a/arch/powerpc/include/asm/kmap_types.h
8545+++ b/arch/powerpc/include/asm/kmap_types.h
8546@@ -10,7 +10,7 @@
8547 * 2 of the License, or (at your option) any later version.
8548 */
8549
8550-#define KM_TYPE_NR 16
8551+#define KM_TYPE_NR 17
8552
8553 #endif /* __KERNEL__ */
8554 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8555diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8556index b8da913..60b608a 100644
8557--- a/arch/powerpc/include/asm/local.h
8558+++ b/arch/powerpc/include/asm/local.h
8559@@ -9,15 +9,26 @@ typedef struct
8560 atomic_long_t a;
8561 } local_t;
8562
8563+typedef struct
8564+{
8565+ atomic_long_unchecked_t a;
8566+} local_unchecked_t;
8567+
8568 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8569
8570 #define local_read(l) atomic_long_read(&(l)->a)
8571+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8572 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8573+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8574
8575 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8576+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8577 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8578+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8579 #define local_inc(l) atomic_long_inc(&(l)->a)
8580+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8581 #define local_dec(l) atomic_long_dec(&(l)->a)
8582+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8583
8584 static __inline__ long local_add_return(long a, local_t *l)
8585 {
8586@@ -35,6 +46,7 @@ static __inline__ long local_add_return(long a, local_t *l)
8587
8588 return t;
8589 }
8590+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
8591
8592 #define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
8593
8594@@ -54,6 +66,7 @@ static __inline__ long local_sub_return(long a, local_t *l)
8595
8596 return t;
8597 }
8598+#define local_sub_return_unchecked(i, l) atomic_long_sub_return_unchecked((i), (&(l)->a))
8599
8600 static __inline__ long local_inc_return(local_t *l)
8601 {
8602@@ -101,6 +114,8 @@ static __inline__ long local_dec_return(local_t *l)
8603
8604 #define local_cmpxchg(l, o, n) \
8605 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8606+#define local_cmpxchg_unchecked(l, o, n) \
8607+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8608 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8609
8610 /**
8611diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8612index 8565c25..2865190 100644
8613--- a/arch/powerpc/include/asm/mman.h
8614+++ b/arch/powerpc/include/asm/mman.h
8615@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8616 }
8617 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8618
8619-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8620+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8621 {
8622 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8623 }
8624diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8625index 32e4e21..62afb12 100644
8626--- a/arch/powerpc/include/asm/page.h
8627+++ b/arch/powerpc/include/asm/page.h
8628@@ -230,8 +230,9 @@ extern long long virt_phys_offset;
8629 * and needs to be executable. This means the whole heap ends
8630 * up being executable.
8631 */
8632-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8633- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8634+#define VM_DATA_DEFAULT_FLAGS32 \
8635+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8636+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8637
8638 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8639 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8640@@ -259,6 +260,9 @@ extern long long virt_phys_offset;
8641 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8642 #endif
8643
8644+#define ktla_ktva(addr) (addr)
8645+#define ktva_ktla(addr) (addr)
8646+
8647 #ifndef CONFIG_PPC_BOOK3S_64
8648 /*
8649 * Use the top bit of the higher-level page table entries to indicate whether
8650diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8651index 88693ce..ac6f9ab 100644
8652--- a/arch/powerpc/include/asm/page_64.h
8653+++ b/arch/powerpc/include/asm/page_64.h
8654@@ -153,15 +153,18 @@ do { \
8655 * stack by default, so in the absence of a PT_GNU_STACK program header
8656 * we turn execute permission off.
8657 */
8658-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8659- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8660+#define VM_STACK_DEFAULT_FLAGS32 \
8661+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8662+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8663
8664 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8665 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8666
8667+#ifndef CONFIG_PAX_PAGEEXEC
8668 #define VM_STACK_DEFAULT_FLAGS \
8669 (is_32bit_task() ? \
8670 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8671+#endif
8672
8673 #include <asm-generic/getorder.h>
8674
8675diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8676index 4b0be20..c15a27d 100644
8677--- a/arch/powerpc/include/asm/pgalloc-64.h
8678+++ b/arch/powerpc/include/asm/pgalloc-64.h
8679@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8680 #ifndef CONFIG_PPC_64K_PAGES
8681
8682 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8683+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8684
8685 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8686 {
8687@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8688 pud_set(pud, (unsigned long)pmd);
8689 }
8690
8691+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8692+{
8693+ pud_populate(mm, pud, pmd);
8694+}
8695+
8696 #define pmd_populate(mm, pmd, pte_page) \
8697 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8698 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8699@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8700 #endif
8701
8702 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8703+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8704
8705 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8706 pte_t *pte)
8707diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8708index d98c1ec..9f61569 100644
8709--- a/arch/powerpc/include/asm/pgtable.h
8710+++ b/arch/powerpc/include/asm/pgtable.h
8711@@ -2,6 +2,7 @@
8712 #define _ASM_POWERPC_PGTABLE_H
8713 #ifdef __KERNEL__
8714
8715+#include <linux/const.h>
8716 #ifndef __ASSEMBLY__
8717 #include <linux/mmdebug.h>
8718 #include <asm/processor.h> /* For TASK_SIZE */
8719diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8720index 4aad413..85d86bf 100644
8721--- a/arch/powerpc/include/asm/pte-hash32.h
8722+++ b/arch/powerpc/include/asm/pte-hash32.h
8723@@ -21,6 +21,7 @@
8724 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
8725 #define _PAGE_USER 0x004 /* usermode access allowed */
8726 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8727+#define _PAGE_EXEC _PAGE_GUARDED
8728 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8729 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8730 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8731diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8732index bffd89d..a6641ed 100644
8733--- a/arch/powerpc/include/asm/reg.h
8734+++ b/arch/powerpc/include/asm/reg.h
8735@@ -251,6 +251,7 @@
8736 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8737 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8738 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8739+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8740 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8741 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8742 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8743diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8744index 5a6614a..d89995d1 100644
8745--- a/arch/powerpc/include/asm/smp.h
8746+++ b/arch/powerpc/include/asm/smp.h
8747@@ -51,7 +51,7 @@ struct smp_ops_t {
8748 int (*cpu_disable)(void);
8749 void (*cpu_die)(unsigned int nr);
8750 int (*cpu_bootable)(unsigned int nr);
8751-};
8752+} __no_const;
8753
8754 extern void smp_send_debugger_break(void);
8755 extern void start_secondary_resume(void);
8756diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8757index b034ecd..af7e31f 100644
8758--- a/arch/powerpc/include/asm/thread_info.h
8759+++ b/arch/powerpc/include/asm/thread_info.h
8760@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
8761 #if defined(CONFIG_PPC64)
8762 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8763 #endif
8764+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8765+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8766
8767 /* as above, but as bit values */
8768 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8769@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
8770 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8771 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8772 #define _TIF_NOHZ (1<<TIF_NOHZ)
8773+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8774 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8775 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8776- _TIF_NOHZ)
8777+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8778
8779 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8780 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8781diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8782index 9485b43..3bd3c16 100644
8783--- a/arch/powerpc/include/asm/uaccess.h
8784+++ b/arch/powerpc/include/asm/uaccess.h
8785@@ -58,6 +58,7 @@
8786
8787 #endif
8788
8789+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8790 #define access_ok(type, addr, size) \
8791 (__chk_user_ptr(addr), \
8792 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8793@@ -318,52 +319,6 @@ do { \
8794 extern unsigned long __copy_tofrom_user(void __user *to,
8795 const void __user *from, unsigned long size);
8796
8797-#ifndef __powerpc64__
8798-
8799-static inline unsigned long copy_from_user(void *to,
8800- const void __user *from, unsigned long n)
8801-{
8802- unsigned long over;
8803-
8804- if (access_ok(VERIFY_READ, from, n))
8805- return __copy_tofrom_user((__force void __user *)to, from, n);
8806- if ((unsigned long)from < TASK_SIZE) {
8807- over = (unsigned long)from + n - TASK_SIZE;
8808- return __copy_tofrom_user((__force void __user *)to, from,
8809- n - over) + over;
8810- }
8811- return n;
8812-}
8813-
8814-static inline unsigned long copy_to_user(void __user *to,
8815- const void *from, unsigned long n)
8816-{
8817- unsigned long over;
8818-
8819- if (access_ok(VERIFY_WRITE, to, n))
8820- return __copy_tofrom_user(to, (__force void __user *)from, n);
8821- if ((unsigned long)to < TASK_SIZE) {
8822- over = (unsigned long)to + n - TASK_SIZE;
8823- return __copy_tofrom_user(to, (__force void __user *)from,
8824- n - over) + over;
8825- }
8826- return n;
8827-}
8828-
8829-#else /* __powerpc64__ */
8830-
8831-#define __copy_in_user(to, from, size) \
8832- __copy_tofrom_user((to), (from), (size))
8833-
8834-extern unsigned long copy_from_user(void *to, const void __user *from,
8835- unsigned long n);
8836-extern unsigned long copy_to_user(void __user *to, const void *from,
8837- unsigned long n);
8838-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8839- unsigned long n);
8840-
8841-#endif /* __powerpc64__ */
8842-
8843 static inline unsigned long __copy_from_user_inatomic(void *to,
8844 const void __user *from, unsigned long n)
8845 {
8846@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8847 if (ret == 0)
8848 return 0;
8849 }
8850+
8851+ if (!__builtin_constant_p(n))
8852+ check_object_size(to, n, false);
8853+
8854 return __copy_tofrom_user((__force void __user *)to, from, n);
8855 }
8856
8857@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8858 if (ret == 0)
8859 return 0;
8860 }
8861+
8862+ if (!__builtin_constant_p(n))
8863+ check_object_size(from, n, true);
8864+
8865 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8866 }
8867
8868@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8869 return __copy_to_user_inatomic(to, from, size);
8870 }
8871
8872+#ifndef __powerpc64__
8873+
8874+static inline unsigned long __must_check copy_from_user(void *to,
8875+ const void __user *from, unsigned long n)
8876+{
8877+ unsigned long over;
8878+
8879+ if ((long)n < 0)
8880+ return n;
8881+
8882+ if (access_ok(VERIFY_READ, from, n)) {
8883+ if (!__builtin_constant_p(n))
8884+ check_object_size(to, n, false);
8885+ return __copy_tofrom_user((__force void __user *)to, from, n);
8886+ }
8887+ if ((unsigned long)from < TASK_SIZE) {
8888+ over = (unsigned long)from + n - TASK_SIZE;
8889+ if (!__builtin_constant_p(n - over))
8890+ check_object_size(to, n - over, false);
8891+ return __copy_tofrom_user((__force void __user *)to, from,
8892+ n - over) + over;
8893+ }
8894+ return n;
8895+}
8896+
8897+static inline unsigned long __must_check copy_to_user(void __user *to,
8898+ const void *from, unsigned long n)
8899+{
8900+ unsigned long over;
8901+
8902+ if ((long)n < 0)
8903+ return n;
8904+
8905+ if (access_ok(VERIFY_WRITE, to, n)) {
8906+ if (!__builtin_constant_p(n))
8907+ check_object_size(from, n, true);
8908+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8909+ }
8910+ if ((unsigned long)to < TASK_SIZE) {
8911+ over = (unsigned long)to + n - TASK_SIZE;
8912+ if (!__builtin_constant_p(n))
8913+ check_object_size(from, n - over, true);
8914+ return __copy_tofrom_user(to, (__force void __user *)from,
8915+ n - over) + over;
8916+ }
8917+ return n;
8918+}
8919+
8920+#else /* __powerpc64__ */
8921+
8922+#define __copy_in_user(to, from, size) \
8923+ __copy_tofrom_user((to), (from), (size))
8924+
8925+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8926+{
8927+ if ((long)n < 0 || n > INT_MAX)
8928+ return n;
8929+
8930+ if (!__builtin_constant_p(n))
8931+ check_object_size(to, n, false);
8932+
8933+ if (likely(access_ok(VERIFY_READ, from, n)))
8934+ n = __copy_from_user(to, from, n);
8935+ else
8936+ memset(to, 0, n);
8937+ return n;
8938+}
8939+
8940+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8941+{
8942+ if ((long)n < 0 || n > INT_MAX)
8943+ return n;
8944+
8945+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8946+ if (!__builtin_constant_p(n))
8947+ check_object_size(from, n, true);
8948+ n = __copy_to_user(to, from, n);
8949+ }
8950+ return n;
8951+}
8952+
8953+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8954+ unsigned long n);
8955+
8956+#endif /* __powerpc64__ */
8957+
8958 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8959
8960 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8961diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8962index 670c312..60c2b52 100644
8963--- a/arch/powerpc/kernel/Makefile
8964+++ b/arch/powerpc/kernel/Makefile
8965@@ -27,6 +27,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8966 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8967 endif
8968
8969+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8970+
8971 obj-y := cputable.o ptrace.o syscalls.o \
8972 irq.o align.o signal_32.o pmc.o vdso.o \
8973 process.o systbl.o idle.o \
8974diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8975index bb9cac6..5181202 100644
8976--- a/arch/powerpc/kernel/exceptions-64e.S
8977+++ b/arch/powerpc/kernel/exceptions-64e.S
8978@@ -1010,6 +1010,7 @@ storage_fault_common:
8979 std r14,_DAR(r1)
8980 std r15,_DSISR(r1)
8981 addi r3,r1,STACK_FRAME_OVERHEAD
8982+ bl save_nvgprs
8983 mr r4,r14
8984 mr r5,r15
8985 ld r14,PACA_EXGEN+EX_R14(r13)
8986@@ -1018,8 +1019,7 @@ storage_fault_common:
8987 cmpdi r3,0
8988 bne- 1f
8989 b ret_from_except_lite
8990-1: bl save_nvgprs
8991- mr r5,r3
8992+1: mr r5,r3
8993 addi r3,r1,STACK_FRAME_OVERHEAD
8994 ld r4,_DAR(r1)
8995 bl bad_page_fault
8996diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8997index a7d36b1..53af150 100644
8998--- a/arch/powerpc/kernel/exceptions-64s.S
8999+++ b/arch/powerpc/kernel/exceptions-64s.S
9000@@ -1637,10 +1637,10 @@ handle_page_fault:
9001 11: ld r4,_DAR(r1)
9002 ld r5,_DSISR(r1)
9003 addi r3,r1,STACK_FRAME_OVERHEAD
9004+ bl save_nvgprs
9005 bl do_page_fault
9006 cmpdi r3,0
9007 beq+ 12f
9008- bl save_nvgprs
9009 mr r5,r3
9010 addi r3,r1,STACK_FRAME_OVERHEAD
9011 lwz r4,_DAR(r1)
9012diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
9013index 248ee7e..1eb60dd 100644
9014--- a/arch/powerpc/kernel/irq.c
9015+++ b/arch/powerpc/kernel/irq.c
9016@@ -447,6 +447,8 @@ void migrate_irqs(void)
9017 }
9018 #endif
9019
9020+extern void gr_handle_kernel_exploit(void);
9021+
9022 static inline void check_stack_overflow(void)
9023 {
9024 #ifdef CONFIG_DEBUG_STACKOVERFLOW
9025@@ -459,6 +461,7 @@ static inline void check_stack_overflow(void)
9026 printk("do_IRQ: stack overflow: %ld\n",
9027 sp - sizeof(struct thread_info));
9028 dump_stack();
9029+ gr_handle_kernel_exploit();
9030 }
9031 #endif
9032 }
9033diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
9034index 6cff040..74ac5d1b 100644
9035--- a/arch/powerpc/kernel/module_32.c
9036+++ b/arch/powerpc/kernel/module_32.c
9037@@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
9038 me->arch.core_plt_section = i;
9039 }
9040 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
9041- printk("Module doesn't contain .plt or .init.plt sections.\n");
9042+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
9043 return -ENOEXEC;
9044 }
9045
9046@@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *location,
9047
9048 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
9049 /* Init, or core PLT? */
9050- if (location >= mod->module_core
9051- && location < mod->module_core + mod->core_size)
9052+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
9053+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
9054 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
9055- else
9056+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
9057+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
9058 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
9059+ else {
9060+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
9061+ return ~0UL;
9062+ }
9063
9064 /* Find this entry, or if that fails, the next avail. entry */
9065 while (entry->jump[0]) {
9066@@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
9067 }
9068 #ifdef CONFIG_DYNAMIC_FTRACE
9069 module->arch.tramp =
9070- do_plt_call(module->module_core,
9071+ do_plt_call(module->module_core_rx,
9072 (unsigned long)ftrace_caller,
9073 sechdrs, module);
9074 #endif
9075diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
9076index be99774..9879c82 100644
9077--- a/arch/powerpc/kernel/process.c
9078+++ b/arch/powerpc/kernel/process.c
9079@@ -1039,8 +1039,8 @@ void show_regs(struct pt_regs * regs)
9080 * Lookup NIP late so we have the best change of getting the
9081 * above info out without failing
9082 */
9083- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
9084- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
9085+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
9086+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
9087 #endif
9088 show_stack(current, (unsigned long *) regs->gpr[1]);
9089 if (!user_mode(regs))
9090@@ -1554,10 +1554,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9091 newsp = stack[0];
9092 ip = stack[STACK_FRAME_LR_SAVE];
9093 if (!firstframe || ip != lr) {
9094- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
9095+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
9096 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9097 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
9098- printk(" (%pS)",
9099+ printk(" (%pA)",
9100 (void *)current->ret_stack[curr_frame].ret);
9101 curr_frame--;
9102 }
9103@@ -1577,7 +1577,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9104 struct pt_regs *regs = (struct pt_regs *)
9105 (sp + STACK_FRAME_OVERHEAD);
9106 lr = regs->link;
9107- printk("--- Exception: %lx at %pS\n LR = %pS\n",
9108+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
9109 regs->trap, (void *)regs->nip, (void *)lr);
9110 firstframe = 1;
9111 }
9112@@ -1613,58 +1613,3 @@ void notrace __ppc64_runlatch_off(void)
9113 mtspr(SPRN_CTRLT, ctrl);
9114 }
9115 #endif /* CONFIG_PPC64 */
9116-
9117-unsigned long arch_align_stack(unsigned long sp)
9118-{
9119- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9120- sp -= get_random_int() & ~PAGE_MASK;
9121- return sp & ~0xf;
9122-}
9123-
9124-static inline unsigned long brk_rnd(void)
9125-{
9126- unsigned long rnd = 0;
9127-
9128- /* 8MB for 32bit, 1GB for 64bit */
9129- if (is_32bit_task())
9130- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
9131- else
9132- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
9133-
9134- return rnd << PAGE_SHIFT;
9135-}
9136-
9137-unsigned long arch_randomize_brk(struct mm_struct *mm)
9138-{
9139- unsigned long base = mm->brk;
9140- unsigned long ret;
9141-
9142-#ifdef CONFIG_PPC_STD_MMU_64
9143- /*
9144- * If we are using 1TB segments and we are allowed to randomise
9145- * the heap, we can put it above 1TB so it is backed by a 1TB
9146- * segment. Otherwise the heap will be in the bottom 1TB
9147- * which always uses 256MB segments and this may result in a
9148- * performance penalty.
9149- */
9150- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9151- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9152-#endif
9153-
9154- ret = PAGE_ALIGN(base + brk_rnd());
9155-
9156- if (ret < mm->brk)
9157- return mm->brk;
9158-
9159- return ret;
9160-}
9161-
9162-unsigned long randomize_et_dyn(unsigned long base)
9163-{
9164- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
9165-
9166- if (ret < base)
9167- return base;
9168-
9169- return ret;
9170-}
9171diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9172index 2e3d2bf..35df241 100644
9173--- a/arch/powerpc/kernel/ptrace.c
9174+++ b/arch/powerpc/kernel/ptrace.c
9175@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9176 return ret;
9177 }
9178
9179+#ifdef CONFIG_GRKERNSEC_SETXID
9180+extern void gr_delayed_cred_worker(void);
9181+#endif
9182+
9183 /*
9184 * We must return the syscall number to actually look up in the table.
9185 * This can be -1L to skip running any syscall at all.
9186@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9187
9188 secure_computing_strict(regs->gpr[0]);
9189
9190+#ifdef CONFIG_GRKERNSEC_SETXID
9191+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9192+ gr_delayed_cred_worker();
9193+#endif
9194+
9195 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9196 tracehook_report_syscall_entry(regs))
9197 /*
9198@@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9199 {
9200 int step;
9201
9202+#ifdef CONFIG_GRKERNSEC_SETXID
9203+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9204+ gr_delayed_cred_worker();
9205+#endif
9206+
9207 audit_syscall_exit(regs);
9208
9209 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9210diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9211index 1bc5a17..910d3f3 100644
9212--- a/arch/powerpc/kernel/signal_32.c
9213+++ b/arch/powerpc/kernel/signal_32.c
9214@@ -1012,7 +1012,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
9215 /* Save user registers on the stack */
9216 frame = &rt_sf->uc.uc_mcontext;
9217 addr = frame;
9218- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9219+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9220 sigret = 0;
9221 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9222 } else {
9223diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9224index 97c1e4b..f427f81 100644
9225--- a/arch/powerpc/kernel/signal_64.c
9226+++ b/arch/powerpc/kernel/signal_64.c
9227@@ -755,7 +755,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
9228 current->thread.fp_state.fpscr = 0;
9229
9230 /* Set up to return from userspace. */
9231- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9232+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9233 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9234 } else {
9235 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9236diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9237index 239f1cd..5359f76 100644
9238--- a/arch/powerpc/kernel/traps.c
9239+++ b/arch/powerpc/kernel/traps.c
9240@@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9241 return flags;
9242 }
9243
9244+extern void gr_handle_kernel_exploit(void);
9245+
9246 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9247 int signr)
9248 {
9249@@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9250 panic("Fatal exception in interrupt");
9251 if (panic_on_oops)
9252 panic("Fatal exception");
9253+
9254+ gr_handle_kernel_exploit();
9255+
9256 do_exit(signr);
9257 }
9258
9259diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9260index ce74c33..0803371 100644
9261--- a/arch/powerpc/kernel/vdso.c
9262+++ b/arch/powerpc/kernel/vdso.c
9263@@ -35,6 +35,7 @@
9264 #include <asm/vdso.h>
9265 #include <asm/vdso_datapage.h>
9266 #include <asm/setup.h>
9267+#include <asm/mman.h>
9268
9269 #undef DEBUG
9270
9271@@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9272 vdso_base = VDSO32_MBASE;
9273 #endif
9274
9275- current->mm->context.vdso_base = 0;
9276+ current->mm->context.vdso_base = ~0UL;
9277
9278 /* vDSO has a problem and was disabled, just don't "enable" it for the
9279 * process
9280@@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9281 vdso_base = get_unmapped_area(NULL, vdso_base,
9282 (vdso_pages << PAGE_SHIFT) +
9283 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9284- 0, 0);
9285+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9286 if (IS_ERR_VALUE(vdso_base)) {
9287 rc = vdso_base;
9288 goto fail_mmapsem;
9289diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9290index 61c738a..b1092d6 100644
9291--- a/arch/powerpc/kvm/powerpc.c
9292+++ b/arch/powerpc/kvm/powerpc.c
9293@@ -1195,7 +1195,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9294 }
9295 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9296
9297-int kvm_arch_init(void *opaque)
9298+int kvm_arch_init(const void *opaque)
9299 {
9300 return 0;
9301 }
9302diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9303index 5eea6f3..5d10396 100644
9304--- a/arch/powerpc/lib/usercopy_64.c
9305+++ b/arch/powerpc/lib/usercopy_64.c
9306@@ -9,22 +9,6 @@
9307 #include <linux/module.h>
9308 #include <asm/uaccess.h>
9309
9310-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9311-{
9312- if (likely(access_ok(VERIFY_READ, from, n)))
9313- n = __copy_from_user(to, from, n);
9314- else
9315- memset(to, 0, n);
9316- return n;
9317-}
9318-
9319-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9320-{
9321- if (likely(access_ok(VERIFY_WRITE, to, n)))
9322- n = __copy_to_user(to, from, n);
9323- return n;
9324-}
9325-
9326 unsigned long copy_in_user(void __user *to, const void __user *from,
9327 unsigned long n)
9328 {
9329@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9330 return n;
9331 }
9332
9333-EXPORT_SYMBOL(copy_from_user);
9334-EXPORT_SYMBOL(copy_to_user);
9335 EXPORT_SYMBOL(copy_in_user);
9336
9337diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9338index 51ab9e7..7d3c78b 100644
9339--- a/arch/powerpc/mm/fault.c
9340+++ b/arch/powerpc/mm/fault.c
9341@@ -33,6 +33,10 @@
9342 #include <linux/magic.h>
9343 #include <linux/ratelimit.h>
9344 #include <linux/context_tracking.h>
9345+#include <linux/slab.h>
9346+#include <linux/pagemap.h>
9347+#include <linux/compiler.h>
9348+#include <linux/unistd.h>
9349
9350 #include <asm/firmware.h>
9351 #include <asm/page.h>
9352@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9353 }
9354 #endif
9355
9356+#ifdef CONFIG_PAX_PAGEEXEC
9357+/*
9358+ * PaX: decide what to do with offenders (regs->nip = fault address)
9359+ *
9360+ * returns 1 when task should be killed
9361+ */
9362+static int pax_handle_fetch_fault(struct pt_regs *regs)
9363+{
9364+ return 1;
9365+}
9366+
9367+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9368+{
9369+ unsigned long i;
9370+
9371+ printk(KERN_ERR "PAX: bytes at PC: ");
9372+ for (i = 0; i < 5; i++) {
9373+ unsigned int c;
9374+ if (get_user(c, (unsigned int __user *)pc+i))
9375+ printk(KERN_CONT "???????? ");
9376+ else
9377+ printk(KERN_CONT "%08x ", c);
9378+ }
9379+ printk("\n");
9380+}
9381+#endif
9382+
9383 /*
9384 * Check whether the instruction at regs->nip is a store using
9385 * an update addressing form which will update r1.
9386@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9387 * indicate errors in DSISR but can validly be set in SRR1.
9388 */
9389 if (trap == 0x400)
9390- error_code &= 0x48200000;
9391+ error_code &= 0x58200000;
9392 else
9393 is_write = error_code & DSISR_ISSTORE;
9394 #else
9395@@ -378,7 +409,7 @@ good_area:
9396 * "undefined". Of those that can be set, this is the only
9397 * one which seems bad.
9398 */
9399- if (error_code & 0x10000000)
9400+ if (error_code & DSISR_GUARDED)
9401 /* Guarded storage error. */
9402 goto bad_area;
9403 #endif /* CONFIG_8xx */
9404@@ -393,7 +424,7 @@ good_area:
9405 * processors use the same I/D cache coherency mechanism
9406 * as embedded.
9407 */
9408- if (error_code & DSISR_PROTFAULT)
9409+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9410 goto bad_area;
9411 #endif /* CONFIG_PPC_STD_MMU */
9412
9413@@ -483,6 +514,23 @@ bad_area:
9414 bad_area_nosemaphore:
9415 /* User mode accesses cause a SIGSEGV */
9416 if (user_mode(regs)) {
9417+
9418+#ifdef CONFIG_PAX_PAGEEXEC
9419+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9420+#ifdef CONFIG_PPC_STD_MMU
9421+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9422+#else
9423+ if (is_exec && regs->nip == address) {
9424+#endif
9425+ switch (pax_handle_fetch_fault(regs)) {
9426+ }
9427+
9428+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9429+ do_group_exit(SIGKILL);
9430+ }
9431+ }
9432+#endif
9433+
9434 _exception(SIGSEGV, regs, code, address);
9435 goto bail;
9436 }
9437diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9438index cb8bdbe..cde4bc7 100644
9439--- a/arch/powerpc/mm/mmap.c
9440+++ b/arch/powerpc/mm/mmap.c
9441@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9442 return sysctl_legacy_va_layout;
9443 }
9444
9445-static unsigned long mmap_rnd(void)
9446+static unsigned long mmap_rnd(struct mm_struct *mm)
9447 {
9448 unsigned long rnd = 0;
9449
9450+#ifdef CONFIG_PAX_RANDMMAP
9451+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9452+#endif
9453+
9454 if (current->flags & PF_RANDOMIZE) {
9455 /* 8MB for 32bit, 1GB for 64bit */
9456 if (is_32bit_task())
9457@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9458 return rnd << PAGE_SHIFT;
9459 }
9460
9461-static inline unsigned long mmap_base(void)
9462+static inline unsigned long mmap_base(struct mm_struct *mm)
9463 {
9464 unsigned long gap = rlimit(RLIMIT_STACK);
9465
9466@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9467 else if (gap > MAX_GAP)
9468 gap = MAX_GAP;
9469
9470- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9471+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9472 }
9473
9474 /*
9475@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9476 */
9477 if (mmap_is_legacy()) {
9478 mm->mmap_base = TASK_UNMAPPED_BASE;
9479+
9480+#ifdef CONFIG_PAX_RANDMMAP
9481+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9482+ mm->mmap_base += mm->delta_mmap;
9483+#endif
9484+
9485 mm->get_unmapped_area = arch_get_unmapped_area;
9486 } else {
9487- mm->mmap_base = mmap_base();
9488+ mm->mmap_base = mmap_base(mm);
9489+
9490+#ifdef CONFIG_PAX_RANDMMAP
9491+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9492+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9493+#endif
9494+
9495 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9496 }
9497 }
9498diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9499index b0c75cc..ef7fb93 100644
9500--- a/arch/powerpc/mm/slice.c
9501+++ b/arch/powerpc/mm/slice.c
9502@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9503 if ((mm->task_size - len) < addr)
9504 return 0;
9505 vma = find_vma(mm, addr);
9506- return (!vma || (addr + len) <= vma->vm_start);
9507+ return check_heap_stack_gap(vma, addr, len, 0);
9508 }
9509
9510 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9511@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9512 info.align_offset = 0;
9513
9514 addr = TASK_UNMAPPED_BASE;
9515+
9516+#ifdef CONFIG_PAX_RANDMMAP
9517+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9518+ addr += mm->delta_mmap;
9519+#endif
9520+
9521 while (addr < TASK_SIZE) {
9522 info.low_limit = addr;
9523 if (!slice_scan_available(addr, available, 1, &addr))
9524@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9525 if (fixed && addr > (mm->task_size - len))
9526 return -ENOMEM;
9527
9528+#ifdef CONFIG_PAX_RANDMMAP
9529+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9530+ addr = 0;
9531+#endif
9532+
9533 /* If hint, make sure it matches our alignment restrictions */
9534 if (!fixed && addr) {
9535 addr = _ALIGN_UP(addr, 1ul << pshift);
9536diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9537index 4278acf..67fd0e6 100644
9538--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9539+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9540@@ -400,8 +400,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
9541 }
9542
9543 static struct pci_ops scc_pciex_pci_ops = {
9544- scc_pciex_read_config,
9545- scc_pciex_write_config,
9546+ .read = scc_pciex_read_config,
9547+ .write = scc_pciex_write_config,
9548 };
9549
9550 static void pciex_clear_intr_all(unsigned int __iomem *base)
9551diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9552index 9098692..3d54cd1 100644
9553--- a/arch/powerpc/platforms/cell/spufs/file.c
9554+++ b/arch/powerpc/platforms/cell/spufs/file.c
9555@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9556 return VM_FAULT_NOPAGE;
9557 }
9558
9559-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9560+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9561 unsigned long address,
9562- void *buf, int len, int write)
9563+ void *buf, size_t len, int write)
9564 {
9565 struct spu_context *ctx = vma->vm_file->private_data;
9566 unsigned long offset = address - vma->vm_start;
9567diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9568index fa934fe..c296056 100644
9569--- a/arch/s390/include/asm/atomic.h
9570+++ b/arch/s390/include/asm/atomic.h
9571@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9572 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9573 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9574
9575+#define atomic64_read_unchecked(v) atomic64_read(v)
9576+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9577+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9578+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9579+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9580+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9581+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9582+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9583+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9584+
9585 #endif /* __ARCH_S390_ATOMIC__ */
9586diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9587index 19ff956..8d39cb1 100644
9588--- a/arch/s390/include/asm/barrier.h
9589+++ b/arch/s390/include/asm/barrier.h
9590@@ -37,7 +37,7 @@
9591 do { \
9592 compiletime_assert_atomic_type(*p); \
9593 barrier(); \
9594- ACCESS_ONCE(*p) = (v); \
9595+ ACCESS_ONCE_RW(*p) = (v); \
9596 } while (0)
9597
9598 #define smp_load_acquire(p) \
9599diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9600index 4d7ccac..d03d0ad 100644
9601--- a/arch/s390/include/asm/cache.h
9602+++ b/arch/s390/include/asm/cache.h
9603@@ -9,8 +9,10 @@
9604 #ifndef __ARCH_S390_CACHE_H
9605 #define __ARCH_S390_CACHE_H
9606
9607-#define L1_CACHE_BYTES 256
9608+#include <linux/const.h>
9609+
9610 #define L1_CACHE_SHIFT 8
9611+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9612 #define NET_SKB_PAD 32
9613
9614 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9615diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9616index 78f4f87..598ce39 100644
9617--- a/arch/s390/include/asm/elf.h
9618+++ b/arch/s390/include/asm/elf.h
9619@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
9620 the loader. We need to make sure that it is out of the way of the program
9621 that it will "exec", and that there is sufficient room for the brk. */
9622
9623-extern unsigned long randomize_et_dyn(unsigned long base);
9624-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
9625+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9626+
9627+#ifdef CONFIG_PAX_ASLR
9628+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9629+
9630+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9631+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9632+#endif
9633
9634 /* This yields a mask that user programs can use to figure out what
9635 instruction set this CPU supports. */
9636@@ -222,9 +228,6 @@ struct linux_binprm;
9637 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9638 int arch_setup_additional_pages(struct linux_binprm *, int);
9639
9640-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9641-#define arch_randomize_brk arch_randomize_brk
9642-
9643 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
9644
9645 #endif
9646diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9647index c4a93d6..4d2a9b4 100644
9648--- a/arch/s390/include/asm/exec.h
9649+++ b/arch/s390/include/asm/exec.h
9650@@ -7,6 +7,6 @@
9651 #ifndef __ASM_EXEC_H
9652 #define __ASM_EXEC_H
9653
9654-extern unsigned long arch_align_stack(unsigned long sp);
9655+#define arch_align_stack(x) ((x) & ~0xfUL)
9656
9657 #endif /* __ASM_EXEC_H */
9658diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9659index cd4c68e..6764641 100644
9660--- a/arch/s390/include/asm/uaccess.h
9661+++ b/arch/s390/include/asm/uaccess.h
9662@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9663 __range_ok((unsigned long)(addr), (size)); \
9664 })
9665
9666+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9667 #define access_ok(type, addr, size) __access_ok(addr, size)
9668
9669 /*
9670@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9671 copy_to_user(void __user *to, const void *from, unsigned long n)
9672 {
9673 might_fault();
9674+
9675+ if ((long)n < 0)
9676+ return n;
9677+
9678 return __copy_to_user(to, from, n);
9679 }
9680
9681@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9682 static inline unsigned long __must_check
9683 copy_from_user(void *to, const void __user *from, unsigned long n)
9684 {
9685- unsigned int sz = __compiletime_object_size(to);
9686+ size_t sz = __compiletime_object_size(to);
9687
9688 might_fault();
9689- if (unlikely(sz != -1 && sz < n)) {
9690+
9691+ if ((long)n < 0)
9692+ return n;
9693+
9694+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9695 copy_from_user_overflow();
9696 return n;
9697 }
9698diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9699index b89b591..fd9609d 100644
9700--- a/arch/s390/kernel/module.c
9701+++ b/arch/s390/kernel/module.c
9702@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9703
9704 /* Increase core size by size of got & plt and set start
9705 offsets for got and plt. */
9706- me->core_size = ALIGN(me->core_size, 4);
9707- me->arch.got_offset = me->core_size;
9708- me->core_size += me->arch.got_size;
9709- me->arch.plt_offset = me->core_size;
9710- me->core_size += me->arch.plt_size;
9711+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9712+ me->arch.got_offset = me->core_size_rw;
9713+ me->core_size_rw += me->arch.got_size;
9714+ me->arch.plt_offset = me->core_size_rx;
9715+ me->core_size_rx += me->arch.plt_size;
9716 return 0;
9717 }
9718
9719@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9720 if (info->got_initialized == 0) {
9721 Elf_Addr *gotent;
9722
9723- gotent = me->module_core + me->arch.got_offset +
9724+ gotent = me->module_core_rw + me->arch.got_offset +
9725 info->got_offset;
9726 *gotent = val;
9727 info->got_initialized = 1;
9728@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9729 rc = apply_rela_bits(loc, val, 0, 64, 0);
9730 else if (r_type == R_390_GOTENT ||
9731 r_type == R_390_GOTPLTENT) {
9732- val += (Elf_Addr) me->module_core - loc;
9733+ val += (Elf_Addr) me->module_core_rw - loc;
9734 rc = apply_rela_bits(loc, val, 1, 32, 1);
9735 }
9736 break;
9737@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9738 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9739 if (info->plt_initialized == 0) {
9740 unsigned int *ip;
9741- ip = me->module_core + me->arch.plt_offset +
9742+ ip = me->module_core_rx + me->arch.plt_offset +
9743 info->plt_offset;
9744 #ifndef CONFIG_64BIT
9745 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9746@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9747 val - loc + 0xffffUL < 0x1ffffeUL) ||
9748 (r_type == R_390_PLT32DBL &&
9749 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9750- val = (Elf_Addr) me->module_core +
9751+ val = (Elf_Addr) me->module_core_rx +
9752 me->arch.plt_offset +
9753 info->plt_offset;
9754 val += rela->r_addend - loc;
9755@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9756 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9757 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9758 val = val + rela->r_addend -
9759- ((Elf_Addr) me->module_core + me->arch.got_offset);
9760+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9761 if (r_type == R_390_GOTOFF16)
9762 rc = apply_rela_bits(loc, val, 0, 16, 0);
9763 else if (r_type == R_390_GOTOFF32)
9764@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9765 break;
9766 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9767 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9768- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9769+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9770 rela->r_addend - loc;
9771 if (r_type == R_390_GOTPC)
9772 rc = apply_rela_bits(loc, val, 1, 32, 0);
9773diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9774index 93b9ca4..4ea1454 100644
9775--- a/arch/s390/kernel/process.c
9776+++ b/arch/s390/kernel/process.c
9777@@ -242,37 +242,3 @@ unsigned long get_wchan(struct task_struct *p)
9778 }
9779 return 0;
9780 }
9781-
9782-unsigned long arch_align_stack(unsigned long sp)
9783-{
9784- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9785- sp -= get_random_int() & ~PAGE_MASK;
9786- return sp & ~0xf;
9787-}
9788-
9789-static inline unsigned long brk_rnd(void)
9790-{
9791- /* 8MB for 32bit, 1GB for 64bit */
9792- if (is_32bit_task())
9793- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9794- else
9795- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9796-}
9797-
9798-unsigned long arch_randomize_brk(struct mm_struct *mm)
9799-{
9800- unsigned long ret;
9801-
9802- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9803- return (ret > mm->brk) ? ret : mm->brk;
9804-}
9805-
9806-unsigned long randomize_et_dyn(unsigned long base)
9807-{
9808- unsigned long ret;
9809-
9810- if (!(current->flags & PF_RANDOMIZE))
9811- return base;
9812- ret = PAGE_ALIGN(base + brk_rnd());
9813- return (ret > base) ? ret : base;
9814-}
9815diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9816index 9b436c2..54fbf0a 100644
9817--- a/arch/s390/mm/mmap.c
9818+++ b/arch/s390/mm/mmap.c
9819@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9820 */
9821 if (mmap_is_legacy()) {
9822 mm->mmap_base = mmap_base_legacy();
9823+
9824+#ifdef CONFIG_PAX_RANDMMAP
9825+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9826+ mm->mmap_base += mm->delta_mmap;
9827+#endif
9828+
9829 mm->get_unmapped_area = arch_get_unmapped_area;
9830 } else {
9831 mm->mmap_base = mmap_base();
9832+
9833+#ifdef CONFIG_PAX_RANDMMAP
9834+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9835+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9836+#endif
9837+
9838 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9839 }
9840 }
9841@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9842 */
9843 if (mmap_is_legacy()) {
9844 mm->mmap_base = mmap_base_legacy();
9845+
9846+#ifdef CONFIG_PAX_RANDMMAP
9847+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9848+ mm->mmap_base += mm->delta_mmap;
9849+#endif
9850+
9851 mm->get_unmapped_area = s390_get_unmapped_area;
9852 } else {
9853 mm->mmap_base = mmap_base();
9854+
9855+#ifdef CONFIG_PAX_RANDMMAP
9856+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9857+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9858+#endif
9859+
9860 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9861 }
9862 }
9863diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9864index ae3d59f..f65f075 100644
9865--- a/arch/score/include/asm/cache.h
9866+++ b/arch/score/include/asm/cache.h
9867@@ -1,7 +1,9 @@
9868 #ifndef _ASM_SCORE_CACHE_H
9869 #define _ASM_SCORE_CACHE_H
9870
9871+#include <linux/const.h>
9872+
9873 #define L1_CACHE_SHIFT 4
9874-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9875+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9876
9877 #endif /* _ASM_SCORE_CACHE_H */
9878diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9879index f9f3cd5..58ff438 100644
9880--- a/arch/score/include/asm/exec.h
9881+++ b/arch/score/include/asm/exec.h
9882@@ -1,6 +1,6 @@
9883 #ifndef _ASM_SCORE_EXEC_H
9884 #define _ASM_SCORE_EXEC_H
9885
9886-extern unsigned long arch_align_stack(unsigned long sp);
9887+#define arch_align_stack(x) (x)
9888
9889 #endif /* _ASM_SCORE_EXEC_H */
9890diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9891index a1519ad3..e8ac1ff 100644
9892--- a/arch/score/kernel/process.c
9893+++ b/arch/score/kernel/process.c
9894@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9895
9896 return task_pt_regs(task)->cp0_epc;
9897 }
9898-
9899-unsigned long arch_align_stack(unsigned long sp)
9900-{
9901- return sp;
9902-}
9903diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9904index ef9e555..331bd29 100644
9905--- a/arch/sh/include/asm/cache.h
9906+++ b/arch/sh/include/asm/cache.h
9907@@ -9,10 +9,11 @@
9908 #define __ASM_SH_CACHE_H
9909 #ifdef __KERNEL__
9910
9911+#include <linux/const.h>
9912 #include <linux/init.h>
9913 #include <cpu/cache.h>
9914
9915-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9916+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9917
9918 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9919
9920diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9921index 6777177..cb5e44f 100644
9922--- a/arch/sh/mm/mmap.c
9923+++ b/arch/sh/mm/mmap.c
9924@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9925 struct mm_struct *mm = current->mm;
9926 struct vm_area_struct *vma;
9927 int do_colour_align;
9928+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9929 struct vm_unmapped_area_info info;
9930
9931 if (flags & MAP_FIXED) {
9932@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9933 if (filp || (flags & MAP_SHARED))
9934 do_colour_align = 1;
9935
9936+#ifdef CONFIG_PAX_RANDMMAP
9937+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9938+#endif
9939+
9940 if (addr) {
9941 if (do_colour_align)
9942 addr = COLOUR_ALIGN(addr, pgoff);
9943@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9944 addr = PAGE_ALIGN(addr);
9945
9946 vma = find_vma(mm, addr);
9947- if (TASK_SIZE - len >= addr &&
9948- (!vma || addr + len <= vma->vm_start))
9949+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9950 return addr;
9951 }
9952
9953 info.flags = 0;
9954 info.length = len;
9955- info.low_limit = TASK_UNMAPPED_BASE;
9956+ info.low_limit = mm->mmap_base;
9957 info.high_limit = TASK_SIZE;
9958 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9959 info.align_offset = pgoff << PAGE_SHIFT;
9960@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9961 struct mm_struct *mm = current->mm;
9962 unsigned long addr = addr0;
9963 int do_colour_align;
9964+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9965 struct vm_unmapped_area_info info;
9966
9967 if (flags & MAP_FIXED) {
9968@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9969 if (filp || (flags & MAP_SHARED))
9970 do_colour_align = 1;
9971
9972+#ifdef CONFIG_PAX_RANDMMAP
9973+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9974+#endif
9975+
9976 /* requesting a specific address */
9977 if (addr) {
9978 if (do_colour_align)
9979@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9980 addr = PAGE_ALIGN(addr);
9981
9982 vma = find_vma(mm, addr);
9983- if (TASK_SIZE - len >= addr &&
9984- (!vma || addr + len <= vma->vm_start))
9985+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9986 return addr;
9987 }
9988
9989@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9990 VM_BUG_ON(addr != -ENOMEM);
9991 info.flags = 0;
9992 info.low_limit = TASK_UNMAPPED_BASE;
9993+
9994+#ifdef CONFIG_PAX_RANDMMAP
9995+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9996+ info.low_limit += mm->delta_mmap;
9997+#endif
9998+
9999 info.high_limit = TASK_SIZE;
10000 addr = vm_unmapped_area(&info);
10001 }
10002diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
10003index bb894c8..8141d5c 100644
10004--- a/arch/sparc/include/asm/atomic_64.h
10005+++ b/arch/sparc/include/asm/atomic_64.h
10006@@ -15,18 +15,40 @@
10007 #define ATOMIC64_INIT(i) { (i) }
10008
10009 #define atomic_read(v) (*(volatile int *)&(v)->counter)
10010+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
10011+{
10012+ return v->counter;
10013+}
10014 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
10015+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10016+{
10017+ return v->counter;
10018+}
10019
10020 #define atomic_set(v, i) (((v)->counter) = i)
10021+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
10022+{
10023+ v->counter = i;
10024+}
10025 #define atomic64_set(v, i) (((v)->counter) = i)
10026+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10027+{
10028+ v->counter = i;
10029+}
10030
10031 void atomic_add(int, atomic_t *);
10032+void atomic_add_unchecked(int, atomic_unchecked_t *);
10033 void atomic64_add(long, atomic64_t *);
10034+void atomic64_add_unchecked(long, atomic64_unchecked_t *);
10035 void atomic_sub(int, atomic_t *);
10036+void atomic_sub_unchecked(int, atomic_unchecked_t *);
10037 void atomic64_sub(long, atomic64_t *);
10038+void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
10039
10040 int atomic_add_ret(int, atomic_t *);
10041+int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
10042 long atomic64_add_ret(long, atomic64_t *);
10043+long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
10044 int atomic_sub_ret(int, atomic_t *);
10045 long atomic64_sub_ret(long, atomic64_t *);
10046
10047@@ -34,13 +56,29 @@ long atomic64_sub_ret(long, atomic64_t *);
10048 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
10049
10050 #define atomic_inc_return(v) atomic_add_ret(1, v)
10051+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10052+{
10053+ return atomic_add_ret_unchecked(1, v);
10054+}
10055 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
10056+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10057+{
10058+ return atomic64_add_ret_unchecked(1, v);
10059+}
10060
10061 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
10062 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
10063
10064 #define atomic_add_return(i, v) atomic_add_ret(i, v)
10065+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10066+{
10067+ return atomic_add_ret_unchecked(i, v);
10068+}
10069 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
10070+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10071+{
10072+ return atomic64_add_ret_unchecked(i, v);
10073+}
10074
10075 /*
10076 * atomic_inc_and_test - increment and test
10077@@ -51,6 +89,10 @@ long atomic64_sub_ret(long, atomic64_t *);
10078 * other cases.
10079 */
10080 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
10081+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10082+{
10083+ return atomic_inc_return_unchecked(v) == 0;
10084+}
10085 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
10086
10087 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
10088@@ -60,25 +102,60 @@ long atomic64_sub_ret(long, atomic64_t *);
10089 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
10090
10091 #define atomic_inc(v) atomic_add(1, v)
10092+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10093+{
10094+ atomic_add_unchecked(1, v);
10095+}
10096 #define atomic64_inc(v) atomic64_add(1, v)
10097+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10098+{
10099+ atomic64_add_unchecked(1, v);
10100+}
10101
10102 #define atomic_dec(v) atomic_sub(1, v)
10103+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10104+{
10105+ atomic_sub_unchecked(1, v);
10106+}
10107 #define atomic64_dec(v) atomic64_sub(1, v)
10108+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10109+{
10110+ atomic64_sub_unchecked(1, v);
10111+}
10112
10113 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
10114 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
10115
10116 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
10117+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10118+{
10119+ return cmpxchg(&v->counter, old, new);
10120+}
10121 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
10122+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10123+{
10124+ return xchg(&v->counter, new);
10125+}
10126
10127 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10128 {
10129- int c, old;
10130+ int c, old, new;
10131 c = atomic_read(v);
10132 for (;;) {
10133- if (unlikely(c == (u)))
10134+ if (unlikely(c == u))
10135 break;
10136- old = atomic_cmpxchg((v), c, c + (a));
10137+
10138+ asm volatile("addcc %2, %0, %0\n"
10139+
10140+#ifdef CONFIG_PAX_REFCOUNT
10141+ "tvs %%icc, 6\n"
10142+#endif
10143+
10144+ : "=r" (new)
10145+ : "0" (c), "ir" (a)
10146+ : "cc");
10147+
10148+ old = atomic_cmpxchg(v, c, new);
10149 if (likely(old == c))
10150 break;
10151 c = old;
10152@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10153 #define atomic64_cmpxchg(v, o, n) \
10154 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10155 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10156+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10157+{
10158+ return xchg(&v->counter, new);
10159+}
10160
10161 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10162 {
10163- long c, old;
10164+ long c, old, new;
10165 c = atomic64_read(v);
10166 for (;;) {
10167- if (unlikely(c == (u)))
10168+ if (unlikely(c == u))
10169 break;
10170- old = atomic64_cmpxchg((v), c, c + (a));
10171+
10172+ asm volatile("addcc %2, %0, %0\n"
10173+
10174+#ifdef CONFIG_PAX_REFCOUNT
10175+ "tvs %%xcc, 6\n"
10176+#endif
10177+
10178+ : "=r" (new)
10179+ : "0" (c), "ir" (a)
10180+ : "cc");
10181+
10182+ old = atomic64_cmpxchg(v, c, new);
10183 if (likely(old == c))
10184 break;
10185 c = old;
10186 }
10187- return c != (u);
10188+ return c != u;
10189 }
10190
10191 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10192diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10193index 305dcc3..7835030 100644
10194--- a/arch/sparc/include/asm/barrier_64.h
10195+++ b/arch/sparc/include/asm/barrier_64.h
10196@@ -57,7 +57,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10197 do { \
10198 compiletime_assert_atomic_type(*p); \
10199 barrier(); \
10200- ACCESS_ONCE(*p) = (v); \
10201+ ACCESS_ONCE_RW(*p) = (v); \
10202 } while (0)
10203
10204 #define smp_load_acquire(p) \
10205diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10206index 5bb6991..5c2132e 100644
10207--- a/arch/sparc/include/asm/cache.h
10208+++ b/arch/sparc/include/asm/cache.h
10209@@ -7,10 +7,12 @@
10210 #ifndef _SPARC_CACHE_H
10211 #define _SPARC_CACHE_H
10212
10213+#include <linux/const.h>
10214+
10215 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10216
10217 #define L1_CACHE_SHIFT 5
10218-#define L1_CACHE_BYTES 32
10219+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10220
10221 #ifdef CONFIG_SPARC32
10222 #define SMP_CACHE_BYTES_SHIFT 5
10223diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10224index a24e41f..47677ff 100644
10225--- a/arch/sparc/include/asm/elf_32.h
10226+++ b/arch/sparc/include/asm/elf_32.h
10227@@ -114,6 +114,13 @@ typedef struct {
10228
10229 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10230
10231+#ifdef CONFIG_PAX_ASLR
10232+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10233+
10234+#define PAX_DELTA_MMAP_LEN 16
10235+#define PAX_DELTA_STACK_LEN 16
10236+#endif
10237+
10238 /* This yields a mask that user programs can use to figure out what
10239 instruction set this cpu supports. This can NOT be done in userspace
10240 on Sparc. */
10241diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10242index 370ca1e..d4f4a98 100644
10243--- a/arch/sparc/include/asm/elf_64.h
10244+++ b/arch/sparc/include/asm/elf_64.h
10245@@ -189,6 +189,13 @@ typedef struct {
10246 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10247 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10248
10249+#ifdef CONFIG_PAX_ASLR
10250+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10251+
10252+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10253+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10254+#endif
10255+
10256 extern unsigned long sparc64_elf_hwcap;
10257 #define ELF_HWCAP sparc64_elf_hwcap
10258
10259diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10260index a3890da..f6a408e 100644
10261--- a/arch/sparc/include/asm/pgalloc_32.h
10262+++ b/arch/sparc/include/asm/pgalloc_32.h
10263@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10264 }
10265
10266 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10267+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10268
10269 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10270 unsigned long address)
10271diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10272index 39a7ac4..2c9b586 100644
10273--- a/arch/sparc/include/asm/pgalloc_64.h
10274+++ b/arch/sparc/include/asm/pgalloc_64.h
10275@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
10276 }
10277
10278 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
10279+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10280
10281 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
10282 {
10283diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10284index 59ba6f6..4518128 100644
10285--- a/arch/sparc/include/asm/pgtable.h
10286+++ b/arch/sparc/include/asm/pgtable.h
10287@@ -5,4 +5,8 @@
10288 #else
10289 #include <asm/pgtable_32.h>
10290 #endif
10291+
10292+#define ktla_ktva(addr) (addr)
10293+#define ktva_ktla(addr) (addr)
10294+
10295 #endif
10296diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10297index b9b91ae..950b91e 100644
10298--- a/arch/sparc/include/asm/pgtable_32.h
10299+++ b/arch/sparc/include/asm/pgtable_32.h
10300@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10301 #define PAGE_SHARED SRMMU_PAGE_SHARED
10302 #define PAGE_COPY SRMMU_PAGE_COPY
10303 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10304+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10305+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10306+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10307 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10308
10309 /* Top-level page directory - dummy used by init-mm.
10310@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10311
10312 /* xwr */
10313 #define __P000 PAGE_NONE
10314-#define __P001 PAGE_READONLY
10315-#define __P010 PAGE_COPY
10316-#define __P011 PAGE_COPY
10317+#define __P001 PAGE_READONLY_NOEXEC
10318+#define __P010 PAGE_COPY_NOEXEC
10319+#define __P011 PAGE_COPY_NOEXEC
10320 #define __P100 PAGE_READONLY
10321 #define __P101 PAGE_READONLY
10322 #define __P110 PAGE_COPY
10323 #define __P111 PAGE_COPY
10324
10325 #define __S000 PAGE_NONE
10326-#define __S001 PAGE_READONLY
10327-#define __S010 PAGE_SHARED
10328-#define __S011 PAGE_SHARED
10329+#define __S001 PAGE_READONLY_NOEXEC
10330+#define __S010 PAGE_SHARED_NOEXEC
10331+#define __S011 PAGE_SHARED_NOEXEC
10332 #define __S100 PAGE_READONLY
10333 #define __S101 PAGE_READONLY
10334 #define __S110 PAGE_SHARED
10335diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10336index 79da178..c2eede8 100644
10337--- a/arch/sparc/include/asm/pgtsrmmu.h
10338+++ b/arch/sparc/include/asm/pgtsrmmu.h
10339@@ -115,6 +115,11 @@
10340 SRMMU_EXEC | SRMMU_REF)
10341 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10342 SRMMU_EXEC | SRMMU_REF)
10343+
10344+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10345+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10346+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10347+
10348 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10349 SRMMU_DIRTY | SRMMU_REF)
10350
10351diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10352index f5fffd8..a0669f0 100644
10353--- a/arch/sparc/include/asm/setup.h
10354+++ b/arch/sparc/include/asm/setup.h
10355@@ -53,8 +53,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10356 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10357
10358 /* init_64.c */
10359-extern atomic_t dcpage_flushes;
10360-extern atomic_t dcpage_flushes_xcall;
10361+extern atomic_unchecked_t dcpage_flushes;
10362+extern atomic_unchecked_t dcpage_flushes_xcall;
10363
10364 extern int sysctl_tsb_ratio;
10365 #endif
10366diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10367index 9689176..63c18ea 100644
10368--- a/arch/sparc/include/asm/spinlock_64.h
10369+++ b/arch/sparc/include/asm/spinlock_64.h
10370@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10371
10372 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10373
10374-static void inline arch_read_lock(arch_rwlock_t *lock)
10375+static inline void arch_read_lock(arch_rwlock_t *lock)
10376 {
10377 unsigned long tmp1, tmp2;
10378
10379 __asm__ __volatile__ (
10380 "1: ldsw [%2], %0\n"
10381 " brlz,pn %0, 2f\n"
10382-"4: add %0, 1, %1\n"
10383+"4: addcc %0, 1, %1\n"
10384+
10385+#ifdef CONFIG_PAX_REFCOUNT
10386+" tvs %%icc, 6\n"
10387+#endif
10388+
10389 " cas [%2], %0, %1\n"
10390 " cmp %0, %1\n"
10391 " bne,pn %%icc, 1b\n"
10392@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10393 " .previous"
10394 : "=&r" (tmp1), "=&r" (tmp2)
10395 : "r" (lock)
10396- : "memory");
10397+ : "memory", "cc");
10398 }
10399
10400-static int inline arch_read_trylock(arch_rwlock_t *lock)
10401+static inline int arch_read_trylock(arch_rwlock_t *lock)
10402 {
10403 int tmp1, tmp2;
10404
10405@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10406 "1: ldsw [%2], %0\n"
10407 " brlz,a,pn %0, 2f\n"
10408 " mov 0, %0\n"
10409-" add %0, 1, %1\n"
10410+" addcc %0, 1, %1\n"
10411+
10412+#ifdef CONFIG_PAX_REFCOUNT
10413+" tvs %%icc, 6\n"
10414+#endif
10415+
10416 " cas [%2], %0, %1\n"
10417 " cmp %0, %1\n"
10418 " bne,pn %%icc, 1b\n"
10419@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10420 return tmp1;
10421 }
10422
10423-static void inline arch_read_unlock(arch_rwlock_t *lock)
10424+static inline void arch_read_unlock(arch_rwlock_t *lock)
10425 {
10426 unsigned long tmp1, tmp2;
10427
10428 __asm__ __volatile__(
10429 "1: lduw [%2], %0\n"
10430-" sub %0, 1, %1\n"
10431+" subcc %0, 1, %1\n"
10432+
10433+#ifdef CONFIG_PAX_REFCOUNT
10434+" tvs %%icc, 6\n"
10435+#endif
10436+
10437 " cas [%2], %0, %1\n"
10438 " cmp %0, %1\n"
10439 " bne,pn %%xcc, 1b\n"
10440@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10441 : "memory");
10442 }
10443
10444-static void inline arch_write_lock(arch_rwlock_t *lock)
10445+static inline void arch_write_lock(arch_rwlock_t *lock)
10446 {
10447 unsigned long mask, tmp1, tmp2;
10448
10449@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10450 : "memory");
10451 }
10452
10453-static void inline arch_write_unlock(arch_rwlock_t *lock)
10454+static inline void arch_write_unlock(arch_rwlock_t *lock)
10455 {
10456 __asm__ __volatile__(
10457 " stw %%g0, [%0]"
10458@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10459 : "memory");
10460 }
10461
10462-static int inline arch_write_trylock(arch_rwlock_t *lock)
10463+static inline int arch_write_trylock(arch_rwlock_t *lock)
10464 {
10465 unsigned long mask, tmp1, tmp2, result;
10466
10467diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10468index 96efa7a..16858bf 100644
10469--- a/arch/sparc/include/asm/thread_info_32.h
10470+++ b/arch/sparc/include/asm/thread_info_32.h
10471@@ -49,6 +49,8 @@ struct thread_info {
10472 unsigned long w_saved;
10473
10474 struct restart_block restart_block;
10475+
10476+ unsigned long lowest_stack;
10477 };
10478
10479 /*
10480diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10481index a5f01ac..703b554 100644
10482--- a/arch/sparc/include/asm/thread_info_64.h
10483+++ b/arch/sparc/include/asm/thread_info_64.h
10484@@ -63,6 +63,8 @@ struct thread_info {
10485 struct pt_regs *kern_una_regs;
10486 unsigned int kern_una_insn;
10487
10488+ unsigned long lowest_stack;
10489+
10490 unsigned long fpregs[0] __attribute__ ((aligned(64)));
10491 };
10492
10493@@ -188,12 +190,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10494 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10495 /* flag bit 4 is available */
10496 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10497-/* flag bit 6 is available */
10498+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10499 #define TIF_32BIT 7 /* 32-bit binary */
10500 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10501 #define TIF_SECCOMP 9 /* secure computing */
10502 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10503 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10504+
10505 /* NOTE: Thread flags >= 12 should be ones we have no interest
10506 * in using in assembly, else we can't use the mask as
10507 * an immediate value in instructions such as andcc.
10508@@ -213,12 +216,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
10509 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10510 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10511 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10512+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10513
10514 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10515 _TIF_DO_NOTIFY_RESUME_MASK | \
10516 _TIF_NEED_RESCHED)
10517 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10518
10519+#define _TIF_WORK_SYSCALL \
10520+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10521+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10522+
10523+
10524 /*
10525 * Thread-synchronous status.
10526 *
10527diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10528index bd56c28..4b63d83 100644
10529--- a/arch/sparc/include/asm/uaccess.h
10530+++ b/arch/sparc/include/asm/uaccess.h
10531@@ -1,5 +1,6 @@
10532 #ifndef ___ASM_SPARC_UACCESS_H
10533 #define ___ASM_SPARC_UACCESS_H
10534+
10535 #if defined(__sparc__) && defined(__arch64__)
10536 #include <asm/uaccess_64.h>
10537 #else
10538diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10539index 9634d08..f55fe4f 100644
10540--- a/arch/sparc/include/asm/uaccess_32.h
10541+++ b/arch/sparc/include/asm/uaccess_32.h
10542@@ -250,27 +250,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10543
10544 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10545 {
10546- if (n && __access_ok((unsigned long) to, n))
10547+ if ((long)n < 0)
10548+ return n;
10549+
10550+ if (n && __access_ok((unsigned long) to, n)) {
10551+ if (!__builtin_constant_p(n))
10552+ check_object_size(from, n, true);
10553 return __copy_user(to, (__force void __user *) from, n);
10554- else
10555+ } else
10556 return n;
10557 }
10558
10559 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10560 {
10561+ if ((long)n < 0)
10562+ return n;
10563+
10564+ if (!__builtin_constant_p(n))
10565+ check_object_size(from, n, true);
10566+
10567 return __copy_user(to, (__force void __user *) from, n);
10568 }
10569
10570 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10571 {
10572- if (n && __access_ok((unsigned long) from, n))
10573+ if ((long)n < 0)
10574+ return n;
10575+
10576+ if (n && __access_ok((unsigned long) from, n)) {
10577+ if (!__builtin_constant_p(n))
10578+ check_object_size(to, n, false);
10579 return __copy_user((__force void __user *) to, from, n);
10580- else
10581+ } else
10582 return n;
10583 }
10584
10585 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10586 {
10587+ if ((long)n < 0)
10588+ return n;
10589+
10590 return __copy_user((__force void __user *) to, from, n);
10591 }
10592
10593diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10594index c990a5e..f17b9c1 100644
10595--- a/arch/sparc/include/asm/uaccess_64.h
10596+++ b/arch/sparc/include/asm/uaccess_64.h
10597@@ -10,6 +10,7 @@
10598 #include <linux/compiler.h>
10599 #include <linux/string.h>
10600 #include <linux/thread_info.h>
10601+#include <linux/kernel.h>
10602 #include <asm/asi.h>
10603 #include <asm/spitfire.h>
10604 #include <asm-generic/uaccess-unaligned.h>
10605@@ -214,8 +215,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10606 static inline unsigned long __must_check
10607 copy_from_user(void *to, const void __user *from, unsigned long size)
10608 {
10609- unsigned long ret = ___copy_from_user(to, from, size);
10610+ unsigned long ret;
10611
10612+ if ((long)size < 0 || size > INT_MAX)
10613+ return size;
10614+
10615+ if (!__builtin_constant_p(size))
10616+ check_object_size(to, size, false);
10617+
10618+ ret = ___copy_from_user(to, from, size);
10619 if (unlikely(ret))
10620 ret = copy_from_user_fixup(to, from, size);
10621
10622@@ -231,8 +239,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10623 static inline unsigned long __must_check
10624 copy_to_user(void __user *to, const void *from, unsigned long size)
10625 {
10626- unsigned long ret = ___copy_to_user(to, from, size);
10627+ unsigned long ret;
10628
10629+ if ((long)size < 0 || size > INT_MAX)
10630+ return size;
10631+
10632+ if (!__builtin_constant_p(size))
10633+ check_object_size(from, size, true);
10634+
10635+ ret = ___copy_to_user(to, from, size);
10636 if (unlikely(ret))
10637 ret = copy_to_user_fixup(to, from, size);
10638 return ret;
10639diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10640index 7cf9c6e..6206648 100644
10641--- a/arch/sparc/kernel/Makefile
10642+++ b/arch/sparc/kernel/Makefile
10643@@ -4,7 +4,7 @@
10644 #
10645
10646 asflags-y := -ansi
10647-ccflags-y := -Werror
10648+#ccflags-y := -Werror
10649
10650 extra-y := head_$(BITS).o
10651
10652diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10653index 50e7b62..79fae35 100644
10654--- a/arch/sparc/kernel/process_32.c
10655+++ b/arch/sparc/kernel/process_32.c
10656@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10657
10658 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10659 r->psr, r->pc, r->npc, r->y, print_tainted());
10660- printk("PC: <%pS>\n", (void *) r->pc);
10661+ printk("PC: <%pA>\n", (void *) r->pc);
10662 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10663 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10664 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10665 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10666 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10667 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10668- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10669+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10670
10671 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10672 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10673@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10674 rw = (struct reg_window32 *) fp;
10675 pc = rw->ins[7];
10676 printk("[%08lx : ", pc);
10677- printk("%pS ] ", (void *) pc);
10678+ printk("%pA ] ", (void *) pc);
10679 fp = rw->ins[6];
10680 } while (++count < 16);
10681 printk("\n");
10682diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10683index 027e099..6d4178f 100644
10684--- a/arch/sparc/kernel/process_64.c
10685+++ b/arch/sparc/kernel/process_64.c
10686@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10687 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10688 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10689 if (regs->tstate & TSTATE_PRIV)
10690- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10691+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10692 }
10693
10694 void show_regs(struct pt_regs *regs)
10695@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10696
10697 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10698 regs->tpc, regs->tnpc, regs->y, print_tainted());
10699- printk("TPC: <%pS>\n", (void *) regs->tpc);
10700+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10701 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10702 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10703 regs->u_regs[3]);
10704@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10705 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10706 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10707 regs->u_regs[15]);
10708- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10709+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10710 show_regwindow(regs);
10711 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10712 }
10713@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10714 ((tp && tp->task) ? tp->task->pid : -1));
10715
10716 if (gp->tstate & TSTATE_PRIV) {
10717- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10718+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10719 (void *) gp->tpc,
10720 (void *) gp->o7,
10721 (void *) gp->i7,
10722diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10723index 79cc0d1..ec62734 100644
10724--- a/arch/sparc/kernel/prom_common.c
10725+++ b/arch/sparc/kernel/prom_common.c
10726@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10727
10728 unsigned int prom_early_allocated __initdata;
10729
10730-static struct of_pdt_ops prom_sparc_ops __initdata = {
10731+static struct of_pdt_ops prom_sparc_ops __initconst = {
10732 .nextprop = prom_common_nextprop,
10733 .getproplen = prom_getproplen,
10734 .getproperty = prom_getproperty,
10735diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10736index c13c9f2..d572c34 100644
10737--- a/arch/sparc/kernel/ptrace_64.c
10738+++ b/arch/sparc/kernel/ptrace_64.c
10739@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10740 return ret;
10741 }
10742
10743+#ifdef CONFIG_GRKERNSEC_SETXID
10744+extern void gr_delayed_cred_worker(void);
10745+#endif
10746+
10747 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10748 {
10749 int ret = 0;
10750@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10751 if (test_thread_flag(TIF_NOHZ))
10752 user_exit();
10753
10754+#ifdef CONFIG_GRKERNSEC_SETXID
10755+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10756+ gr_delayed_cred_worker();
10757+#endif
10758+
10759 if (test_thread_flag(TIF_SYSCALL_TRACE))
10760 ret = tracehook_report_syscall_entry(regs);
10761
10762@@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10763 if (test_thread_flag(TIF_NOHZ))
10764 user_exit();
10765
10766+#ifdef CONFIG_GRKERNSEC_SETXID
10767+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10768+ gr_delayed_cred_worker();
10769+#endif
10770+
10771 audit_syscall_exit(regs);
10772
10773 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10774diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10775index 41aa247..eadfb74 100644
10776--- a/arch/sparc/kernel/smp_64.c
10777+++ b/arch/sparc/kernel/smp_64.c
10778@@ -883,7 +883,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10779 return;
10780
10781 #ifdef CONFIG_DEBUG_DCFLUSH
10782- atomic_inc(&dcpage_flushes);
10783+ atomic_inc_unchecked(&dcpage_flushes);
10784 #endif
10785
10786 this_cpu = get_cpu();
10787@@ -907,7 +907,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10788 xcall_deliver(data0, __pa(pg_addr),
10789 (u64) pg_addr, cpumask_of(cpu));
10790 #ifdef CONFIG_DEBUG_DCFLUSH
10791- atomic_inc(&dcpage_flushes_xcall);
10792+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10793 #endif
10794 }
10795 }
10796@@ -926,7 +926,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10797 preempt_disable();
10798
10799 #ifdef CONFIG_DEBUG_DCFLUSH
10800- atomic_inc(&dcpage_flushes);
10801+ atomic_inc_unchecked(&dcpage_flushes);
10802 #endif
10803 data0 = 0;
10804 pg_addr = page_address(page);
10805@@ -943,7 +943,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10806 xcall_deliver(data0, __pa(pg_addr),
10807 (u64) pg_addr, cpu_online_mask);
10808 #ifdef CONFIG_DEBUG_DCFLUSH
10809- atomic_inc(&dcpage_flushes_xcall);
10810+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10811 #endif
10812 }
10813 __local_flush_dcache_page(page);
10814diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10815index 646988d..b88905f 100644
10816--- a/arch/sparc/kernel/sys_sparc_32.c
10817+++ b/arch/sparc/kernel/sys_sparc_32.c
10818@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10819 if (len > TASK_SIZE - PAGE_SIZE)
10820 return -ENOMEM;
10821 if (!addr)
10822- addr = TASK_UNMAPPED_BASE;
10823+ addr = current->mm->mmap_base;
10824
10825 info.flags = 0;
10826 info.length = len;
10827diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10828index c85403d..6af95c9 100644
10829--- a/arch/sparc/kernel/sys_sparc_64.c
10830+++ b/arch/sparc/kernel/sys_sparc_64.c
10831@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10832 struct vm_area_struct * vma;
10833 unsigned long task_size = TASK_SIZE;
10834 int do_color_align;
10835+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10836 struct vm_unmapped_area_info info;
10837
10838 if (flags & MAP_FIXED) {
10839 /* We do not accept a shared mapping if it would violate
10840 * cache aliasing constraints.
10841 */
10842- if ((flags & MAP_SHARED) &&
10843+ if ((filp || (flags & MAP_SHARED)) &&
10844 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10845 return -EINVAL;
10846 return addr;
10847@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10848 if (filp || (flags & MAP_SHARED))
10849 do_color_align = 1;
10850
10851+#ifdef CONFIG_PAX_RANDMMAP
10852+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10853+#endif
10854+
10855 if (addr) {
10856 if (do_color_align)
10857 addr = COLOR_ALIGN(addr, pgoff);
10858@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10859 addr = PAGE_ALIGN(addr);
10860
10861 vma = find_vma(mm, addr);
10862- if (task_size - len >= addr &&
10863- (!vma || addr + len <= vma->vm_start))
10864+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10865 return addr;
10866 }
10867
10868 info.flags = 0;
10869 info.length = len;
10870- info.low_limit = TASK_UNMAPPED_BASE;
10871+ info.low_limit = mm->mmap_base;
10872 info.high_limit = min(task_size, VA_EXCLUDE_START);
10873 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10874 info.align_offset = pgoff << PAGE_SHIFT;
10875+ info.threadstack_offset = offset;
10876 addr = vm_unmapped_area(&info);
10877
10878 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10879 VM_BUG_ON(addr != -ENOMEM);
10880 info.low_limit = VA_EXCLUDE_END;
10881+
10882+#ifdef CONFIG_PAX_RANDMMAP
10883+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10884+ info.low_limit += mm->delta_mmap;
10885+#endif
10886+
10887 info.high_limit = task_size;
10888 addr = vm_unmapped_area(&info);
10889 }
10890@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10891 unsigned long task_size = STACK_TOP32;
10892 unsigned long addr = addr0;
10893 int do_color_align;
10894+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10895 struct vm_unmapped_area_info info;
10896
10897 /* This should only ever run for 32-bit processes. */
10898@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10899 /* We do not accept a shared mapping if it would violate
10900 * cache aliasing constraints.
10901 */
10902- if ((flags & MAP_SHARED) &&
10903+ if ((filp || (flags & MAP_SHARED)) &&
10904 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10905 return -EINVAL;
10906 return addr;
10907@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10908 if (filp || (flags & MAP_SHARED))
10909 do_color_align = 1;
10910
10911+#ifdef CONFIG_PAX_RANDMMAP
10912+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10913+#endif
10914+
10915 /* requesting a specific address */
10916 if (addr) {
10917 if (do_color_align)
10918@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10919 addr = PAGE_ALIGN(addr);
10920
10921 vma = find_vma(mm, addr);
10922- if (task_size - len >= addr &&
10923- (!vma || addr + len <= vma->vm_start))
10924+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10925 return addr;
10926 }
10927
10928@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10929 info.high_limit = mm->mmap_base;
10930 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10931 info.align_offset = pgoff << PAGE_SHIFT;
10932+ info.threadstack_offset = offset;
10933 addr = vm_unmapped_area(&info);
10934
10935 /*
10936@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10937 VM_BUG_ON(addr != -ENOMEM);
10938 info.flags = 0;
10939 info.low_limit = TASK_UNMAPPED_BASE;
10940+
10941+#ifdef CONFIG_PAX_RANDMMAP
10942+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10943+ info.low_limit += mm->delta_mmap;
10944+#endif
10945+
10946 info.high_limit = STACK_TOP32;
10947 addr = vm_unmapped_area(&info);
10948 }
10949@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10950 EXPORT_SYMBOL(get_fb_unmapped_area);
10951
10952 /* Essentially the same as PowerPC. */
10953-static unsigned long mmap_rnd(void)
10954+static unsigned long mmap_rnd(struct mm_struct *mm)
10955 {
10956 unsigned long rnd = 0UL;
10957
10958+#ifdef CONFIG_PAX_RANDMMAP
10959+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10960+#endif
10961+
10962 if (current->flags & PF_RANDOMIZE) {
10963 unsigned long val = get_random_int();
10964 if (test_thread_flag(TIF_32BIT))
10965@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10966
10967 void arch_pick_mmap_layout(struct mm_struct *mm)
10968 {
10969- unsigned long random_factor = mmap_rnd();
10970+ unsigned long random_factor = mmap_rnd(mm);
10971 unsigned long gap;
10972
10973 /*
10974@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10975 gap == RLIM_INFINITY ||
10976 sysctl_legacy_va_layout) {
10977 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10978+
10979+#ifdef CONFIG_PAX_RANDMMAP
10980+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10981+ mm->mmap_base += mm->delta_mmap;
10982+#endif
10983+
10984 mm->get_unmapped_area = arch_get_unmapped_area;
10985 } else {
10986 /* We know it's 32-bit */
10987@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10988 gap = (task_size / 6 * 5);
10989
10990 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10991+
10992+#ifdef CONFIG_PAX_RANDMMAP
10993+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10994+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10995+#endif
10996+
10997 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10998 }
10999 }
11000diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
11001index 33a17e7..d87fb1f 100644
11002--- a/arch/sparc/kernel/syscalls.S
11003+++ b/arch/sparc/kernel/syscalls.S
11004@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
11005 #endif
11006 .align 32
11007 1: ldx [%g6 + TI_FLAGS], %l5
11008- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11009+ andcc %l5, _TIF_WORK_SYSCALL, %g0
11010 be,pt %icc, rtrap
11011 nop
11012 call syscall_trace_leave
11013@@ -184,7 +184,7 @@ linux_sparc_syscall32:
11014
11015 srl %i3, 0, %o3 ! IEU0
11016 srl %i2, 0, %o2 ! IEU0 Group
11017- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11018+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11019 bne,pn %icc, linux_syscall_trace32 ! CTI
11020 mov %i0, %l5 ! IEU1
11021 5: call %l7 ! CTI Group brk forced
11022@@ -208,7 +208,7 @@ linux_sparc_syscall:
11023
11024 mov %i3, %o3 ! IEU1
11025 mov %i4, %o4 ! IEU0 Group
11026- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11027+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11028 bne,pn %icc, linux_syscall_trace ! CTI Group
11029 mov %i0, %l5 ! IEU0
11030 2: call %l7 ! CTI Group brk forced
11031@@ -223,7 +223,7 @@ ret_sys_call:
11032
11033 cmp %o0, -ERESTART_RESTARTBLOCK
11034 bgeu,pn %xcc, 1f
11035- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11036+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11037 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
11038
11039 2:
11040diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
11041index 6fd386c5..6907d81 100644
11042--- a/arch/sparc/kernel/traps_32.c
11043+++ b/arch/sparc/kernel/traps_32.c
11044@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
11045 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
11046 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
11047
11048+extern void gr_handle_kernel_exploit(void);
11049+
11050 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11051 {
11052 static int die_counter;
11053@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11054 count++ < 30 &&
11055 (((unsigned long) rw) >= PAGE_OFFSET) &&
11056 !(((unsigned long) rw) & 0x7)) {
11057- printk("Caller[%08lx]: %pS\n", rw->ins[7],
11058+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
11059 (void *) rw->ins[7]);
11060 rw = (struct reg_window32 *)rw->ins[6];
11061 }
11062 }
11063 printk("Instruction DUMP:");
11064 instruction_dump ((unsigned long *) regs->pc);
11065- if(regs->psr & PSR_PS)
11066+ if(regs->psr & PSR_PS) {
11067+ gr_handle_kernel_exploit();
11068 do_exit(SIGKILL);
11069+ }
11070 do_exit(SIGSEGV);
11071 }
11072
11073diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
11074index fb6640e..2daada8 100644
11075--- a/arch/sparc/kernel/traps_64.c
11076+++ b/arch/sparc/kernel/traps_64.c
11077@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
11078 i + 1,
11079 p->trapstack[i].tstate, p->trapstack[i].tpc,
11080 p->trapstack[i].tnpc, p->trapstack[i].tt);
11081- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
11082+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
11083 }
11084 }
11085
11086@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
11087
11088 lvl -= 0x100;
11089 if (regs->tstate & TSTATE_PRIV) {
11090+
11091+#ifdef CONFIG_PAX_REFCOUNT
11092+ if (lvl == 6)
11093+ pax_report_refcount_overflow(regs);
11094+#endif
11095+
11096 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
11097 die_if_kernel(buffer, regs);
11098 }
11099@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
11100 void bad_trap_tl1(struct pt_regs *regs, long lvl)
11101 {
11102 char buffer[32];
11103-
11104+
11105 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
11106 0, lvl, SIGTRAP) == NOTIFY_STOP)
11107 return;
11108
11109+#ifdef CONFIG_PAX_REFCOUNT
11110+ if (lvl == 6)
11111+ pax_report_refcount_overflow(regs);
11112+#endif
11113+
11114 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
11115
11116 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11117@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11118 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11119 printk("%s" "ERROR(%d): ",
11120 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11121- printk("TPC<%pS>\n", (void *) regs->tpc);
11122+ printk("TPC<%pA>\n", (void *) regs->tpc);
11123 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11124 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11125 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11126@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11127 smp_processor_id(),
11128 (type & 0x1) ? 'I' : 'D',
11129 regs->tpc);
11130- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11131+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11132 panic("Irrecoverable Cheetah+ parity error.");
11133 }
11134
11135@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11136 smp_processor_id(),
11137 (type & 0x1) ? 'I' : 'D',
11138 regs->tpc);
11139- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11140+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11141 }
11142
11143 struct sun4v_error_entry {
11144@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11145 /*0x38*/u64 reserved_5;
11146 };
11147
11148-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11149-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11150+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11151+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11152
11153 static const char *sun4v_err_type_to_str(u8 type)
11154 {
11155@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11156 }
11157
11158 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11159- int cpu, const char *pfx, atomic_t *ocnt)
11160+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11161 {
11162 u64 *raw_ptr = (u64 *) ent;
11163 u32 attrs;
11164@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11165
11166 show_regs(regs);
11167
11168- if ((cnt = atomic_read(ocnt)) != 0) {
11169- atomic_set(ocnt, 0);
11170+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11171+ atomic_set_unchecked(ocnt, 0);
11172 wmb();
11173 printk("%s: Queue overflowed %d times.\n",
11174 pfx, cnt);
11175@@ -2048,7 +2059,7 @@ out:
11176 */
11177 void sun4v_resum_overflow(struct pt_regs *regs)
11178 {
11179- atomic_inc(&sun4v_resum_oflow_cnt);
11180+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11181 }
11182
11183 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11184@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11185 /* XXX Actually even this can make not that much sense. Perhaps
11186 * XXX we should just pull the plug and panic directly from here?
11187 */
11188- atomic_inc(&sun4v_nonresum_oflow_cnt);
11189+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11190 }
11191
11192 unsigned long sun4v_err_itlb_vaddr;
11193@@ -2116,9 +2127,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11194
11195 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11196 regs->tpc, tl);
11197- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11198+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11199 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11200- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11201+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11202 (void *) regs->u_regs[UREG_I7]);
11203 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11204 "pte[%lx] error[%lx]\n",
11205@@ -2140,9 +2151,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11206
11207 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11208 regs->tpc, tl);
11209- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11210+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11211 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11212- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11213+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11214 (void *) regs->u_regs[UREG_I7]);
11215 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11216 "pte[%lx] error[%lx]\n",
11217@@ -2359,13 +2370,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11218 fp = (unsigned long)sf->fp + STACK_BIAS;
11219 }
11220
11221- printk(" [%016lx] %pS\n", pc, (void *) pc);
11222+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11223 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11224 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11225 int index = tsk->curr_ret_stack;
11226 if (tsk->ret_stack && index >= graph) {
11227 pc = tsk->ret_stack[index - graph].ret;
11228- printk(" [%016lx] %pS\n", pc, (void *) pc);
11229+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11230 graph++;
11231 }
11232 }
11233@@ -2383,6 +2394,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11234 return (struct reg_window *) (fp + STACK_BIAS);
11235 }
11236
11237+extern void gr_handle_kernel_exploit(void);
11238+
11239 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11240 {
11241 static int die_counter;
11242@@ -2411,7 +2424,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11243 while (rw &&
11244 count++ < 30 &&
11245 kstack_valid(tp, (unsigned long) rw)) {
11246- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11247+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11248 (void *) rw->ins[7]);
11249
11250 rw = kernel_stack_up(rw);
11251@@ -2424,8 +2437,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11252 }
11253 user_instruction_dump ((unsigned int __user *) regs->tpc);
11254 }
11255- if (regs->tstate & TSTATE_PRIV)
11256+ if (regs->tstate & TSTATE_PRIV) {
11257+ gr_handle_kernel_exploit();
11258 do_exit(SIGKILL);
11259+ }
11260 do_exit(SIGSEGV);
11261 }
11262 EXPORT_SYMBOL(die_if_kernel);
11263diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11264index 62098a8..547ab2c 100644
11265--- a/arch/sparc/kernel/unaligned_64.c
11266+++ b/arch/sparc/kernel/unaligned_64.c
11267@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11268 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11269
11270 if (__ratelimit(&ratelimit)) {
11271- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11272+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11273 regs->tpc, (void *) regs->tpc);
11274 }
11275 }
11276diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11277index 3269b02..64f5231 100644
11278--- a/arch/sparc/lib/Makefile
11279+++ b/arch/sparc/lib/Makefile
11280@@ -2,7 +2,7 @@
11281 #
11282
11283 asflags-y := -ansi -DST_DIV0=0x02
11284-ccflags-y := -Werror
11285+#ccflags-y := -Werror
11286
11287 lib-$(CONFIG_SPARC32) += ashrdi3.o
11288 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11289diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11290index 85c233d..68500e0 100644
11291--- a/arch/sparc/lib/atomic_64.S
11292+++ b/arch/sparc/lib/atomic_64.S
11293@@ -17,7 +17,12 @@
11294 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11295 BACKOFF_SETUP(%o2)
11296 1: lduw [%o1], %g1
11297- add %g1, %o0, %g7
11298+ addcc %g1, %o0, %g7
11299+
11300+#ifdef CONFIG_PAX_REFCOUNT
11301+ tvs %icc, 6
11302+#endif
11303+
11304 cas [%o1], %g1, %g7
11305 cmp %g1, %g7
11306 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11307@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11308 2: BACKOFF_SPIN(%o2, %o3, 1b)
11309 ENDPROC(atomic_add)
11310
11311+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11312+ BACKOFF_SETUP(%o2)
11313+1: lduw [%o1], %g1
11314+ add %g1, %o0, %g7
11315+ cas [%o1], %g1, %g7
11316+ cmp %g1, %g7
11317+ bne,pn %icc, 2f
11318+ nop
11319+ retl
11320+ nop
11321+2: BACKOFF_SPIN(%o2, %o3, 1b)
11322+ENDPROC(atomic_add_unchecked)
11323+
11324 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11325 BACKOFF_SETUP(%o2)
11326 1: lduw [%o1], %g1
11327- sub %g1, %o0, %g7
11328+ subcc %g1, %o0, %g7
11329+
11330+#ifdef CONFIG_PAX_REFCOUNT
11331+ tvs %icc, 6
11332+#endif
11333+
11334 cas [%o1], %g1, %g7
11335 cmp %g1, %g7
11336 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11337@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11338 2: BACKOFF_SPIN(%o2, %o3, 1b)
11339 ENDPROC(atomic_sub)
11340
11341+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11342+ BACKOFF_SETUP(%o2)
11343+1: lduw [%o1], %g1
11344+ sub %g1, %o0, %g7
11345+ cas [%o1], %g1, %g7
11346+ cmp %g1, %g7
11347+ bne,pn %icc, 2f
11348+ nop
11349+ retl
11350+ nop
11351+2: BACKOFF_SPIN(%o2, %o3, 1b)
11352+ENDPROC(atomic_sub_unchecked)
11353+
11354 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11355 BACKOFF_SETUP(%o2)
11356 1: lduw [%o1], %g1
11357- add %g1, %o0, %g7
11358+ addcc %g1, %o0, %g7
11359+
11360+#ifdef CONFIG_PAX_REFCOUNT
11361+ tvs %icc, 6
11362+#endif
11363+
11364 cas [%o1], %g1, %g7
11365 cmp %g1, %g7
11366 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11367@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11368 2: BACKOFF_SPIN(%o2, %o3, 1b)
11369 ENDPROC(atomic_add_ret)
11370
11371+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11372+ BACKOFF_SETUP(%o2)
11373+1: lduw [%o1], %g1
11374+ addcc %g1, %o0, %g7
11375+ cas [%o1], %g1, %g7
11376+ cmp %g1, %g7
11377+ bne,pn %icc, 2f
11378+ add %g7, %o0, %g7
11379+ sra %g7, 0, %o0
11380+ retl
11381+ nop
11382+2: BACKOFF_SPIN(%o2, %o3, 1b)
11383+ENDPROC(atomic_add_ret_unchecked)
11384+
11385 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11386 BACKOFF_SETUP(%o2)
11387 1: lduw [%o1], %g1
11388- sub %g1, %o0, %g7
11389+ subcc %g1, %o0, %g7
11390+
11391+#ifdef CONFIG_PAX_REFCOUNT
11392+ tvs %icc, 6
11393+#endif
11394+
11395 cas [%o1], %g1, %g7
11396 cmp %g1, %g7
11397 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11398@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
11399 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11400 BACKOFF_SETUP(%o2)
11401 1: ldx [%o1], %g1
11402- add %g1, %o0, %g7
11403+ addcc %g1, %o0, %g7
11404+
11405+#ifdef CONFIG_PAX_REFCOUNT
11406+ tvs %xcc, 6
11407+#endif
11408+
11409 casx [%o1], %g1, %g7
11410 cmp %g1, %g7
11411 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11412@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11413 2: BACKOFF_SPIN(%o2, %o3, 1b)
11414 ENDPROC(atomic64_add)
11415
11416+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11417+ BACKOFF_SETUP(%o2)
11418+1: ldx [%o1], %g1
11419+ addcc %g1, %o0, %g7
11420+ casx [%o1], %g1, %g7
11421+ cmp %g1, %g7
11422+ bne,pn %xcc, 2f
11423+ nop
11424+ retl
11425+ nop
11426+2: BACKOFF_SPIN(%o2, %o3, 1b)
11427+ENDPROC(atomic64_add_unchecked)
11428+
11429 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11430 BACKOFF_SETUP(%o2)
11431 1: ldx [%o1], %g1
11432- sub %g1, %o0, %g7
11433+ subcc %g1, %o0, %g7
11434+
11435+#ifdef CONFIG_PAX_REFCOUNT
11436+ tvs %xcc, 6
11437+#endif
11438+
11439 casx [%o1], %g1, %g7
11440 cmp %g1, %g7
11441 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11442@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11443 2: BACKOFF_SPIN(%o2, %o3, 1b)
11444 ENDPROC(atomic64_sub)
11445
11446+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11447+ BACKOFF_SETUP(%o2)
11448+1: ldx [%o1], %g1
11449+ subcc %g1, %o0, %g7
11450+ casx [%o1], %g1, %g7
11451+ cmp %g1, %g7
11452+ bne,pn %xcc, 2f
11453+ nop
11454+ retl
11455+ nop
11456+2: BACKOFF_SPIN(%o2, %o3, 1b)
11457+ENDPROC(atomic64_sub_unchecked)
11458+
11459 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11460 BACKOFF_SETUP(%o2)
11461 1: ldx [%o1], %g1
11462- add %g1, %o0, %g7
11463+ addcc %g1, %o0, %g7
11464+
11465+#ifdef CONFIG_PAX_REFCOUNT
11466+ tvs %xcc, 6
11467+#endif
11468+
11469 casx [%o1], %g1, %g7
11470 cmp %g1, %g7
11471 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11472@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11473 2: BACKOFF_SPIN(%o2, %o3, 1b)
11474 ENDPROC(atomic64_add_ret)
11475
11476+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11477+ BACKOFF_SETUP(%o2)
11478+1: ldx [%o1], %g1
11479+ addcc %g1, %o0, %g7
11480+ casx [%o1], %g1, %g7
11481+ cmp %g1, %g7
11482+ bne,pn %xcc, 2f
11483+ add %g7, %o0, %g7
11484+ mov %g7, %o0
11485+ retl
11486+ nop
11487+2: BACKOFF_SPIN(%o2, %o3, 1b)
11488+ENDPROC(atomic64_add_ret_unchecked)
11489+
11490 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11491 BACKOFF_SETUP(%o2)
11492 1: ldx [%o1], %g1
11493- sub %g1, %o0, %g7
11494+ subcc %g1, %o0, %g7
11495+
11496+#ifdef CONFIG_PAX_REFCOUNT
11497+ tvs %xcc, 6
11498+#endif
11499+
11500 casx [%o1], %g1, %g7
11501 cmp %g1, %g7
11502 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11503diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11504index 323335b..ed85ea2 100644
11505--- a/arch/sparc/lib/ksyms.c
11506+++ b/arch/sparc/lib/ksyms.c
11507@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
11508
11509 /* Atomic counter implementation. */
11510 EXPORT_SYMBOL(atomic_add);
11511+EXPORT_SYMBOL(atomic_add_unchecked);
11512 EXPORT_SYMBOL(atomic_add_ret);
11513+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11514 EXPORT_SYMBOL(atomic_sub);
11515+EXPORT_SYMBOL(atomic_sub_unchecked);
11516 EXPORT_SYMBOL(atomic_sub_ret);
11517 EXPORT_SYMBOL(atomic64_add);
11518+EXPORT_SYMBOL(atomic64_add_unchecked);
11519 EXPORT_SYMBOL(atomic64_add_ret);
11520+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11521 EXPORT_SYMBOL(atomic64_sub);
11522+EXPORT_SYMBOL(atomic64_sub_unchecked);
11523 EXPORT_SYMBOL(atomic64_sub_ret);
11524 EXPORT_SYMBOL(atomic64_dec_if_positive);
11525
11526diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11527index 30c3ecc..736f015 100644
11528--- a/arch/sparc/mm/Makefile
11529+++ b/arch/sparc/mm/Makefile
11530@@ -2,7 +2,7 @@
11531 #
11532
11533 asflags-y := -ansi
11534-ccflags-y := -Werror
11535+#ccflags-y := -Werror
11536
11537 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11538 obj-y += fault_$(BITS).o
11539diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11540index 908e8c1..1524793 100644
11541--- a/arch/sparc/mm/fault_32.c
11542+++ b/arch/sparc/mm/fault_32.c
11543@@ -21,6 +21,9 @@
11544 #include <linux/perf_event.h>
11545 #include <linux/interrupt.h>
11546 #include <linux/kdebug.h>
11547+#include <linux/slab.h>
11548+#include <linux/pagemap.h>
11549+#include <linux/compiler.h>
11550
11551 #include <asm/page.h>
11552 #include <asm/pgtable.h>
11553@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11554 return safe_compute_effective_address(regs, insn);
11555 }
11556
11557+#ifdef CONFIG_PAX_PAGEEXEC
11558+#ifdef CONFIG_PAX_DLRESOLVE
11559+static void pax_emuplt_close(struct vm_area_struct *vma)
11560+{
11561+ vma->vm_mm->call_dl_resolve = 0UL;
11562+}
11563+
11564+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11565+{
11566+ unsigned int *kaddr;
11567+
11568+ vmf->page = alloc_page(GFP_HIGHUSER);
11569+ if (!vmf->page)
11570+ return VM_FAULT_OOM;
11571+
11572+ kaddr = kmap(vmf->page);
11573+ memset(kaddr, 0, PAGE_SIZE);
11574+ kaddr[0] = 0x9DE3BFA8U; /* save */
11575+ flush_dcache_page(vmf->page);
11576+ kunmap(vmf->page);
11577+ return VM_FAULT_MAJOR;
11578+}
11579+
11580+static const struct vm_operations_struct pax_vm_ops = {
11581+ .close = pax_emuplt_close,
11582+ .fault = pax_emuplt_fault
11583+};
11584+
11585+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11586+{
11587+ int ret;
11588+
11589+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11590+ vma->vm_mm = current->mm;
11591+ vma->vm_start = addr;
11592+ vma->vm_end = addr + PAGE_SIZE;
11593+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11594+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11595+ vma->vm_ops = &pax_vm_ops;
11596+
11597+ ret = insert_vm_struct(current->mm, vma);
11598+ if (ret)
11599+ return ret;
11600+
11601+ ++current->mm->total_vm;
11602+ return 0;
11603+}
11604+#endif
11605+
11606+/*
11607+ * PaX: decide what to do with offenders (regs->pc = fault address)
11608+ *
11609+ * returns 1 when task should be killed
11610+ * 2 when patched PLT trampoline was detected
11611+ * 3 when unpatched PLT trampoline was detected
11612+ */
11613+static int pax_handle_fetch_fault(struct pt_regs *regs)
11614+{
11615+
11616+#ifdef CONFIG_PAX_EMUPLT
11617+ int err;
11618+
11619+ do { /* PaX: patched PLT emulation #1 */
11620+ unsigned int sethi1, sethi2, jmpl;
11621+
11622+ err = get_user(sethi1, (unsigned int *)regs->pc);
11623+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11624+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11625+
11626+ if (err)
11627+ break;
11628+
11629+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11630+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11631+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11632+ {
11633+ unsigned int addr;
11634+
11635+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11636+ addr = regs->u_regs[UREG_G1];
11637+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11638+ regs->pc = addr;
11639+ regs->npc = addr+4;
11640+ return 2;
11641+ }
11642+ } while (0);
11643+
11644+ do { /* PaX: patched PLT emulation #2 */
11645+ unsigned int ba;
11646+
11647+ err = get_user(ba, (unsigned int *)regs->pc);
11648+
11649+ if (err)
11650+ break;
11651+
11652+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11653+ unsigned int addr;
11654+
11655+ if ((ba & 0xFFC00000U) == 0x30800000U)
11656+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11657+ else
11658+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11659+ regs->pc = addr;
11660+ regs->npc = addr+4;
11661+ return 2;
11662+ }
11663+ } while (0);
11664+
11665+ do { /* PaX: patched PLT emulation #3 */
11666+ unsigned int sethi, bajmpl, nop;
11667+
11668+ err = get_user(sethi, (unsigned int *)regs->pc);
11669+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11670+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11671+
11672+ if (err)
11673+ break;
11674+
11675+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11676+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11677+ nop == 0x01000000U)
11678+ {
11679+ unsigned int addr;
11680+
11681+ addr = (sethi & 0x003FFFFFU) << 10;
11682+ regs->u_regs[UREG_G1] = addr;
11683+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11684+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11685+ else
11686+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11687+ regs->pc = addr;
11688+ regs->npc = addr+4;
11689+ return 2;
11690+ }
11691+ } while (0);
11692+
11693+ do { /* PaX: unpatched PLT emulation step 1 */
11694+ unsigned int sethi, ba, nop;
11695+
11696+ err = get_user(sethi, (unsigned int *)regs->pc);
11697+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11698+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11699+
11700+ if (err)
11701+ break;
11702+
11703+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11704+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11705+ nop == 0x01000000U)
11706+ {
11707+ unsigned int addr, save, call;
11708+
11709+ if ((ba & 0xFFC00000U) == 0x30800000U)
11710+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11711+ else
11712+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11713+
11714+ err = get_user(save, (unsigned int *)addr);
11715+ err |= get_user(call, (unsigned int *)(addr+4));
11716+ err |= get_user(nop, (unsigned int *)(addr+8));
11717+ if (err)
11718+ break;
11719+
11720+#ifdef CONFIG_PAX_DLRESOLVE
11721+ if (save == 0x9DE3BFA8U &&
11722+ (call & 0xC0000000U) == 0x40000000U &&
11723+ nop == 0x01000000U)
11724+ {
11725+ struct vm_area_struct *vma;
11726+ unsigned long call_dl_resolve;
11727+
11728+ down_read(&current->mm->mmap_sem);
11729+ call_dl_resolve = current->mm->call_dl_resolve;
11730+ up_read(&current->mm->mmap_sem);
11731+ if (likely(call_dl_resolve))
11732+ goto emulate;
11733+
11734+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11735+
11736+ down_write(&current->mm->mmap_sem);
11737+ if (current->mm->call_dl_resolve) {
11738+ call_dl_resolve = current->mm->call_dl_resolve;
11739+ up_write(&current->mm->mmap_sem);
11740+ if (vma)
11741+ kmem_cache_free(vm_area_cachep, vma);
11742+ goto emulate;
11743+ }
11744+
11745+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11746+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11747+ up_write(&current->mm->mmap_sem);
11748+ if (vma)
11749+ kmem_cache_free(vm_area_cachep, vma);
11750+ return 1;
11751+ }
11752+
11753+ if (pax_insert_vma(vma, call_dl_resolve)) {
11754+ up_write(&current->mm->mmap_sem);
11755+ kmem_cache_free(vm_area_cachep, vma);
11756+ return 1;
11757+ }
11758+
11759+ current->mm->call_dl_resolve = call_dl_resolve;
11760+ up_write(&current->mm->mmap_sem);
11761+
11762+emulate:
11763+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11764+ regs->pc = call_dl_resolve;
11765+ regs->npc = addr+4;
11766+ return 3;
11767+ }
11768+#endif
11769+
11770+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11771+ if ((save & 0xFFC00000U) == 0x05000000U &&
11772+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11773+ nop == 0x01000000U)
11774+ {
11775+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11776+ regs->u_regs[UREG_G2] = addr + 4;
11777+ addr = (save & 0x003FFFFFU) << 10;
11778+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11779+ regs->pc = addr;
11780+ regs->npc = addr+4;
11781+ return 3;
11782+ }
11783+ }
11784+ } while (0);
11785+
11786+ do { /* PaX: unpatched PLT emulation step 2 */
11787+ unsigned int save, call, nop;
11788+
11789+ err = get_user(save, (unsigned int *)(regs->pc-4));
11790+ err |= get_user(call, (unsigned int *)regs->pc);
11791+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11792+ if (err)
11793+ break;
11794+
11795+ if (save == 0x9DE3BFA8U &&
11796+ (call & 0xC0000000U) == 0x40000000U &&
11797+ nop == 0x01000000U)
11798+ {
11799+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11800+
11801+ regs->u_regs[UREG_RETPC] = regs->pc;
11802+ regs->pc = dl_resolve;
11803+ regs->npc = dl_resolve+4;
11804+ return 3;
11805+ }
11806+ } while (0);
11807+#endif
11808+
11809+ return 1;
11810+}
11811+
11812+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11813+{
11814+ unsigned long i;
11815+
11816+ printk(KERN_ERR "PAX: bytes at PC: ");
11817+ for (i = 0; i < 8; i++) {
11818+ unsigned int c;
11819+ if (get_user(c, (unsigned int *)pc+i))
11820+ printk(KERN_CONT "???????? ");
11821+ else
11822+ printk(KERN_CONT "%08x ", c);
11823+ }
11824+ printk("\n");
11825+}
11826+#endif
11827+
11828 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11829 int text_fault)
11830 {
11831@@ -226,6 +500,24 @@ good_area:
11832 if (!(vma->vm_flags & VM_WRITE))
11833 goto bad_area;
11834 } else {
11835+
11836+#ifdef CONFIG_PAX_PAGEEXEC
11837+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11838+ up_read(&mm->mmap_sem);
11839+ switch (pax_handle_fetch_fault(regs)) {
11840+
11841+#ifdef CONFIG_PAX_EMUPLT
11842+ case 2:
11843+ case 3:
11844+ return;
11845+#endif
11846+
11847+ }
11848+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11849+ do_group_exit(SIGKILL);
11850+ }
11851+#endif
11852+
11853 /* Allow reads even for write-only mappings */
11854 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11855 goto bad_area;
11856diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11857index 587cd05..fbdf17a 100644
11858--- a/arch/sparc/mm/fault_64.c
11859+++ b/arch/sparc/mm/fault_64.c
11860@@ -22,6 +22,9 @@
11861 #include <linux/kdebug.h>
11862 #include <linux/percpu.h>
11863 #include <linux/context_tracking.h>
11864+#include <linux/slab.h>
11865+#include <linux/pagemap.h>
11866+#include <linux/compiler.h>
11867
11868 #include <asm/page.h>
11869 #include <asm/pgtable.h>
11870@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11871 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11872 regs->tpc);
11873 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11874- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11875+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11876 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11877 dump_stack();
11878 unhandled_fault(regs->tpc, current, regs);
11879@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11880 show_regs(regs);
11881 }
11882
11883+#ifdef CONFIG_PAX_PAGEEXEC
11884+#ifdef CONFIG_PAX_DLRESOLVE
11885+static void pax_emuplt_close(struct vm_area_struct *vma)
11886+{
11887+ vma->vm_mm->call_dl_resolve = 0UL;
11888+}
11889+
11890+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11891+{
11892+ unsigned int *kaddr;
11893+
11894+ vmf->page = alloc_page(GFP_HIGHUSER);
11895+ if (!vmf->page)
11896+ return VM_FAULT_OOM;
11897+
11898+ kaddr = kmap(vmf->page);
11899+ memset(kaddr, 0, PAGE_SIZE);
11900+ kaddr[0] = 0x9DE3BFA8U; /* save */
11901+ flush_dcache_page(vmf->page);
11902+ kunmap(vmf->page);
11903+ return VM_FAULT_MAJOR;
11904+}
11905+
11906+static const struct vm_operations_struct pax_vm_ops = {
11907+ .close = pax_emuplt_close,
11908+ .fault = pax_emuplt_fault
11909+};
11910+
11911+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11912+{
11913+ int ret;
11914+
11915+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11916+ vma->vm_mm = current->mm;
11917+ vma->vm_start = addr;
11918+ vma->vm_end = addr + PAGE_SIZE;
11919+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11920+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11921+ vma->vm_ops = &pax_vm_ops;
11922+
11923+ ret = insert_vm_struct(current->mm, vma);
11924+ if (ret)
11925+ return ret;
11926+
11927+ ++current->mm->total_vm;
11928+ return 0;
11929+}
11930+#endif
11931+
11932+/*
11933+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11934+ *
11935+ * returns 1 when task should be killed
11936+ * 2 when patched PLT trampoline was detected
11937+ * 3 when unpatched PLT trampoline was detected
11938+ */
11939+static int pax_handle_fetch_fault(struct pt_regs *regs)
11940+{
11941+
11942+#ifdef CONFIG_PAX_EMUPLT
11943+ int err;
11944+
11945+ do { /* PaX: patched PLT emulation #1 */
11946+ unsigned int sethi1, sethi2, jmpl;
11947+
11948+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11949+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11950+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11951+
11952+ if (err)
11953+ break;
11954+
11955+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11956+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11957+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11958+ {
11959+ unsigned long addr;
11960+
11961+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11962+ addr = regs->u_regs[UREG_G1];
11963+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11964+
11965+ if (test_thread_flag(TIF_32BIT))
11966+ addr &= 0xFFFFFFFFUL;
11967+
11968+ regs->tpc = addr;
11969+ regs->tnpc = addr+4;
11970+ return 2;
11971+ }
11972+ } while (0);
11973+
11974+ do { /* PaX: patched PLT emulation #2 */
11975+ unsigned int ba;
11976+
11977+ err = get_user(ba, (unsigned int *)regs->tpc);
11978+
11979+ if (err)
11980+ break;
11981+
11982+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11983+ unsigned long addr;
11984+
11985+ if ((ba & 0xFFC00000U) == 0x30800000U)
11986+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11987+ else
11988+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11989+
11990+ if (test_thread_flag(TIF_32BIT))
11991+ addr &= 0xFFFFFFFFUL;
11992+
11993+ regs->tpc = addr;
11994+ regs->tnpc = addr+4;
11995+ return 2;
11996+ }
11997+ } while (0);
11998+
11999+ do { /* PaX: patched PLT emulation #3 */
12000+ unsigned int sethi, bajmpl, nop;
12001+
12002+ err = get_user(sethi, (unsigned int *)regs->tpc);
12003+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
12004+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12005+
12006+ if (err)
12007+ break;
12008+
12009+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12010+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
12011+ nop == 0x01000000U)
12012+ {
12013+ unsigned long addr;
12014+
12015+ addr = (sethi & 0x003FFFFFU) << 10;
12016+ regs->u_regs[UREG_G1] = addr;
12017+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
12018+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12019+ else
12020+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12021+
12022+ if (test_thread_flag(TIF_32BIT))
12023+ addr &= 0xFFFFFFFFUL;
12024+
12025+ regs->tpc = addr;
12026+ regs->tnpc = addr+4;
12027+ return 2;
12028+ }
12029+ } while (0);
12030+
12031+ do { /* PaX: patched PLT emulation #4 */
12032+ unsigned int sethi, mov1, call, mov2;
12033+
12034+ err = get_user(sethi, (unsigned int *)regs->tpc);
12035+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
12036+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
12037+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
12038+
12039+ if (err)
12040+ break;
12041+
12042+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12043+ mov1 == 0x8210000FU &&
12044+ (call & 0xC0000000U) == 0x40000000U &&
12045+ mov2 == 0x9E100001U)
12046+ {
12047+ unsigned long addr;
12048+
12049+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
12050+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12051+
12052+ if (test_thread_flag(TIF_32BIT))
12053+ addr &= 0xFFFFFFFFUL;
12054+
12055+ regs->tpc = addr;
12056+ regs->tnpc = addr+4;
12057+ return 2;
12058+ }
12059+ } while (0);
12060+
12061+ do { /* PaX: patched PLT emulation #5 */
12062+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
12063+
12064+ err = get_user(sethi, (unsigned int *)regs->tpc);
12065+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12066+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12067+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
12068+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
12069+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
12070+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
12071+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
12072+
12073+ if (err)
12074+ break;
12075+
12076+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12077+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12078+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12079+ (or1 & 0xFFFFE000U) == 0x82106000U &&
12080+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12081+ sllx == 0x83287020U &&
12082+ jmpl == 0x81C04005U &&
12083+ nop == 0x01000000U)
12084+ {
12085+ unsigned long addr;
12086+
12087+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12088+ regs->u_regs[UREG_G1] <<= 32;
12089+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12090+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12091+ regs->tpc = addr;
12092+ regs->tnpc = addr+4;
12093+ return 2;
12094+ }
12095+ } while (0);
12096+
12097+ do { /* PaX: patched PLT emulation #6 */
12098+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
12099+
12100+ err = get_user(sethi, (unsigned int *)regs->tpc);
12101+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12102+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12103+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
12104+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
12105+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
12106+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
12107+
12108+ if (err)
12109+ break;
12110+
12111+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12112+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12113+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12114+ sllx == 0x83287020U &&
12115+ (or & 0xFFFFE000U) == 0x8A116000U &&
12116+ jmpl == 0x81C04005U &&
12117+ nop == 0x01000000U)
12118+ {
12119+ unsigned long addr;
12120+
12121+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
12122+ regs->u_regs[UREG_G1] <<= 32;
12123+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
12124+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12125+ regs->tpc = addr;
12126+ regs->tnpc = addr+4;
12127+ return 2;
12128+ }
12129+ } while (0);
12130+
12131+ do { /* PaX: unpatched PLT emulation step 1 */
12132+ unsigned int sethi, ba, nop;
12133+
12134+ err = get_user(sethi, (unsigned int *)regs->tpc);
12135+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12136+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12137+
12138+ if (err)
12139+ break;
12140+
12141+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12142+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
12143+ nop == 0x01000000U)
12144+ {
12145+ unsigned long addr;
12146+ unsigned int save, call;
12147+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
12148+
12149+ if ((ba & 0xFFC00000U) == 0x30800000U)
12150+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12151+ else
12152+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12153+
12154+ if (test_thread_flag(TIF_32BIT))
12155+ addr &= 0xFFFFFFFFUL;
12156+
12157+ err = get_user(save, (unsigned int *)addr);
12158+ err |= get_user(call, (unsigned int *)(addr+4));
12159+ err |= get_user(nop, (unsigned int *)(addr+8));
12160+ if (err)
12161+ break;
12162+
12163+#ifdef CONFIG_PAX_DLRESOLVE
12164+ if (save == 0x9DE3BFA8U &&
12165+ (call & 0xC0000000U) == 0x40000000U &&
12166+ nop == 0x01000000U)
12167+ {
12168+ struct vm_area_struct *vma;
12169+ unsigned long call_dl_resolve;
12170+
12171+ down_read(&current->mm->mmap_sem);
12172+ call_dl_resolve = current->mm->call_dl_resolve;
12173+ up_read(&current->mm->mmap_sem);
12174+ if (likely(call_dl_resolve))
12175+ goto emulate;
12176+
12177+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
12178+
12179+ down_write(&current->mm->mmap_sem);
12180+ if (current->mm->call_dl_resolve) {
12181+ call_dl_resolve = current->mm->call_dl_resolve;
12182+ up_write(&current->mm->mmap_sem);
12183+ if (vma)
12184+ kmem_cache_free(vm_area_cachep, vma);
12185+ goto emulate;
12186+ }
12187+
12188+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12189+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12190+ up_write(&current->mm->mmap_sem);
12191+ if (vma)
12192+ kmem_cache_free(vm_area_cachep, vma);
12193+ return 1;
12194+ }
12195+
12196+ if (pax_insert_vma(vma, call_dl_resolve)) {
12197+ up_write(&current->mm->mmap_sem);
12198+ kmem_cache_free(vm_area_cachep, vma);
12199+ return 1;
12200+ }
12201+
12202+ current->mm->call_dl_resolve = call_dl_resolve;
12203+ up_write(&current->mm->mmap_sem);
12204+
12205+emulate:
12206+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12207+ regs->tpc = call_dl_resolve;
12208+ regs->tnpc = addr+4;
12209+ return 3;
12210+ }
12211+#endif
12212+
12213+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12214+ if ((save & 0xFFC00000U) == 0x05000000U &&
12215+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12216+ nop == 0x01000000U)
12217+ {
12218+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12219+ regs->u_regs[UREG_G2] = addr + 4;
12220+ addr = (save & 0x003FFFFFU) << 10;
12221+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12222+
12223+ if (test_thread_flag(TIF_32BIT))
12224+ addr &= 0xFFFFFFFFUL;
12225+
12226+ regs->tpc = addr;
12227+ regs->tnpc = addr+4;
12228+ return 3;
12229+ }
12230+
12231+ /* PaX: 64-bit PLT stub */
12232+ err = get_user(sethi1, (unsigned int *)addr);
12233+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12234+ err |= get_user(or1, (unsigned int *)(addr+8));
12235+ err |= get_user(or2, (unsigned int *)(addr+12));
12236+ err |= get_user(sllx, (unsigned int *)(addr+16));
12237+ err |= get_user(add, (unsigned int *)(addr+20));
12238+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12239+ err |= get_user(nop, (unsigned int *)(addr+28));
12240+ if (err)
12241+ break;
12242+
12243+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12244+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12245+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12246+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12247+ sllx == 0x89293020U &&
12248+ add == 0x8A010005U &&
12249+ jmpl == 0x89C14000U &&
12250+ nop == 0x01000000U)
12251+ {
12252+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12253+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12254+ regs->u_regs[UREG_G4] <<= 32;
12255+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12256+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12257+ regs->u_regs[UREG_G4] = addr + 24;
12258+ addr = regs->u_regs[UREG_G5];
12259+ regs->tpc = addr;
12260+ regs->tnpc = addr+4;
12261+ return 3;
12262+ }
12263+ }
12264+ } while (0);
12265+
12266+#ifdef CONFIG_PAX_DLRESOLVE
12267+ do { /* PaX: unpatched PLT emulation step 2 */
12268+ unsigned int save, call, nop;
12269+
12270+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12271+ err |= get_user(call, (unsigned int *)regs->tpc);
12272+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12273+ if (err)
12274+ break;
12275+
12276+ if (save == 0x9DE3BFA8U &&
12277+ (call & 0xC0000000U) == 0x40000000U &&
12278+ nop == 0x01000000U)
12279+ {
12280+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12281+
12282+ if (test_thread_flag(TIF_32BIT))
12283+ dl_resolve &= 0xFFFFFFFFUL;
12284+
12285+ regs->u_regs[UREG_RETPC] = regs->tpc;
12286+ regs->tpc = dl_resolve;
12287+ regs->tnpc = dl_resolve+4;
12288+ return 3;
12289+ }
12290+ } while (0);
12291+#endif
12292+
12293+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12294+ unsigned int sethi, ba, nop;
12295+
12296+ err = get_user(sethi, (unsigned int *)regs->tpc);
12297+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12298+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12299+
12300+ if (err)
12301+ break;
12302+
12303+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12304+ (ba & 0xFFF00000U) == 0x30600000U &&
12305+ nop == 0x01000000U)
12306+ {
12307+ unsigned long addr;
12308+
12309+ addr = (sethi & 0x003FFFFFU) << 10;
12310+ regs->u_regs[UREG_G1] = addr;
12311+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12312+
12313+ if (test_thread_flag(TIF_32BIT))
12314+ addr &= 0xFFFFFFFFUL;
12315+
12316+ regs->tpc = addr;
12317+ regs->tnpc = addr+4;
12318+ return 2;
12319+ }
12320+ } while (0);
12321+
12322+#endif
12323+
12324+ return 1;
12325+}
12326+
12327+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12328+{
12329+ unsigned long i;
12330+
12331+ printk(KERN_ERR "PAX: bytes at PC: ");
12332+ for (i = 0; i < 8; i++) {
12333+ unsigned int c;
12334+ if (get_user(c, (unsigned int *)pc+i))
12335+ printk(KERN_CONT "???????? ");
12336+ else
12337+ printk(KERN_CONT "%08x ", c);
12338+ }
12339+ printk("\n");
12340+}
12341+#endif
12342+
12343 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12344 {
12345 enum ctx_state prev_state = exception_enter();
12346@@ -350,6 +813,29 @@ retry:
12347 if (!vma)
12348 goto bad_area;
12349
12350+#ifdef CONFIG_PAX_PAGEEXEC
12351+ /* PaX: detect ITLB misses on non-exec pages */
12352+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12353+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12354+ {
12355+ if (address != regs->tpc)
12356+ goto good_area;
12357+
12358+ up_read(&mm->mmap_sem);
12359+ switch (pax_handle_fetch_fault(regs)) {
12360+
12361+#ifdef CONFIG_PAX_EMUPLT
12362+ case 2:
12363+ case 3:
12364+ return;
12365+#endif
12366+
12367+ }
12368+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12369+ do_group_exit(SIGKILL);
12370+ }
12371+#endif
12372+
12373 /* Pure DTLB misses do not tell us whether the fault causing
12374 * load/store/atomic was a write or not, it only says that there
12375 * was no match. So in such a case we (carefully) read the
12376diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12377index d329537..2c3746a 100644
12378--- a/arch/sparc/mm/hugetlbpage.c
12379+++ b/arch/sparc/mm/hugetlbpage.c
12380@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12381 unsigned long addr,
12382 unsigned long len,
12383 unsigned long pgoff,
12384- unsigned long flags)
12385+ unsigned long flags,
12386+ unsigned long offset)
12387 {
12388+ struct mm_struct *mm = current->mm;
12389 unsigned long task_size = TASK_SIZE;
12390 struct vm_unmapped_area_info info;
12391
12392@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12393
12394 info.flags = 0;
12395 info.length = len;
12396- info.low_limit = TASK_UNMAPPED_BASE;
12397+ info.low_limit = mm->mmap_base;
12398 info.high_limit = min(task_size, VA_EXCLUDE_START);
12399 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12400 info.align_offset = 0;
12401+ info.threadstack_offset = offset;
12402 addr = vm_unmapped_area(&info);
12403
12404 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12405 VM_BUG_ON(addr != -ENOMEM);
12406 info.low_limit = VA_EXCLUDE_END;
12407+
12408+#ifdef CONFIG_PAX_RANDMMAP
12409+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12410+ info.low_limit += mm->delta_mmap;
12411+#endif
12412+
12413 info.high_limit = task_size;
12414 addr = vm_unmapped_area(&info);
12415 }
12416@@ -55,7 +64,8 @@ static unsigned long
12417 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12418 const unsigned long len,
12419 const unsigned long pgoff,
12420- const unsigned long flags)
12421+ const unsigned long flags,
12422+ const unsigned long offset)
12423 {
12424 struct mm_struct *mm = current->mm;
12425 unsigned long addr = addr0;
12426@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12427 info.high_limit = mm->mmap_base;
12428 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12429 info.align_offset = 0;
12430+ info.threadstack_offset = offset;
12431 addr = vm_unmapped_area(&info);
12432
12433 /*
12434@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12435 VM_BUG_ON(addr != -ENOMEM);
12436 info.flags = 0;
12437 info.low_limit = TASK_UNMAPPED_BASE;
12438+
12439+#ifdef CONFIG_PAX_RANDMMAP
12440+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12441+ info.low_limit += mm->delta_mmap;
12442+#endif
12443+
12444 info.high_limit = STACK_TOP32;
12445 addr = vm_unmapped_area(&info);
12446 }
12447@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12448 struct mm_struct *mm = current->mm;
12449 struct vm_area_struct *vma;
12450 unsigned long task_size = TASK_SIZE;
12451+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12452
12453 if (test_thread_flag(TIF_32BIT))
12454 task_size = STACK_TOP32;
12455@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12456 return addr;
12457 }
12458
12459+#ifdef CONFIG_PAX_RANDMMAP
12460+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12461+#endif
12462+
12463 if (addr) {
12464 addr = ALIGN(addr, HPAGE_SIZE);
12465 vma = find_vma(mm, addr);
12466- if (task_size - len >= addr &&
12467- (!vma || addr + len <= vma->vm_start))
12468+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12469 return addr;
12470 }
12471 if (mm->get_unmapped_area == arch_get_unmapped_area)
12472 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12473- pgoff, flags);
12474+ pgoff, flags, offset);
12475 else
12476 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12477- pgoff, flags);
12478+ pgoff, flags, offset);
12479 }
12480
12481 pte_t *huge_pte_alloc(struct mm_struct *mm,
12482diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12483index 2cfb0f2..e917d9f 100644
12484--- a/arch/sparc/mm/init_64.c
12485+++ b/arch/sparc/mm/init_64.c
12486@@ -189,9 +189,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12487 int num_kernel_image_mappings;
12488
12489 #ifdef CONFIG_DEBUG_DCFLUSH
12490-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12491+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12492 #ifdef CONFIG_SMP
12493-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12494+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12495 #endif
12496 #endif
12497
12498@@ -199,7 +199,7 @@ inline void flush_dcache_page_impl(struct page *page)
12499 {
12500 BUG_ON(tlb_type == hypervisor);
12501 #ifdef CONFIG_DEBUG_DCFLUSH
12502- atomic_inc(&dcpage_flushes);
12503+ atomic_inc_unchecked(&dcpage_flushes);
12504 #endif
12505
12506 #ifdef DCACHE_ALIASING_POSSIBLE
12507@@ -471,10 +471,10 @@ void mmu_info(struct seq_file *m)
12508
12509 #ifdef CONFIG_DEBUG_DCFLUSH
12510 seq_printf(m, "DCPageFlushes\t: %d\n",
12511- atomic_read(&dcpage_flushes));
12512+ atomic_read_unchecked(&dcpage_flushes));
12513 #ifdef CONFIG_SMP
12514 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12515- atomic_read(&dcpage_flushes_xcall));
12516+ atomic_read_unchecked(&dcpage_flushes_xcall));
12517 #endif /* CONFIG_SMP */
12518 #endif /* CONFIG_DEBUG_DCFLUSH */
12519 }
12520diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12521index 4f3006b..453f625f 100644
12522--- a/arch/tile/Kconfig
12523+++ b/arch/tile/Kconfig
12524@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12525
12526 config KEXEC
12527 bool "kexec system call"
12528+ depends on !GRKERNSEC_KMEM
12529 ---help---
12530 kexec is a system call that implements the ability to shutdown your
12531 current kernel, and to start another kernel. It is like a reboot
12532diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12533index 7b11c5f..755a026 100644
12534--- a/arch/tile/include/asm/atomic_64.h
12535+++ b/arch/tile/include/asm/atomic_64.h
12536@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12537
12538 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12539
12540+#define atomic64_read_unchecked(v) atomic64_read(v)
12541+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12542+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12543+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12544+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12545+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12546+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12547+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12548+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12549+
12550 /* Define this to indicate that cmpxchg is an efficient operation. */
12551 #define __HAVE_ARCH_CMPXCHG
12552
12553diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12554index 6160761..00cac88 100644
12555--- a/arch/tile/include/asm/cache.h
12556+++ b/arch/tile/include/asm/cache.h
12557@@ -15,11 +15,12 @@
12558 #ifndef _ASM_TILE_CACHE_H
12559 #define _ASM_TILE_CACHE_H
12560
12561+#include <linux/const.h>
12562 #include <arch/chip.h>
12563
12564 /* bytes per L1 data cache line */
12565 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12566-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12567+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12568
12569 /* bytes per L2 cache line */
12570 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12571diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12572index b6cde32..c0cb736 100644
12573--- a/arch/tile/include/asm/uaccess.h
12574+++ b/arch/tile/include/asm/uaccess.h
12575@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12576 const void __user *from,
12577 unsigned long n)
12578 {
12579- int sz = __compiletime_object_size(to);
12580+ size_t sz = __compiletime_object_size(to);
12581
12582- if (likely(sz == -1 || sz >= n))
12583+ if (likely(sz == (size_t)-1 || sz >= n))
12584 n = _copy_from_user(to, from, n);
12585 else
12586 copy_from_user_overflow();
12587diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12588index e514899..f8743c4 100644
12589--- a/arch/tile/mm/hugetlbpage.c
12590+++ b/arch/tile/mm/hugetlbpage.c
12591@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12592 info.high_limit = TASK_SIZE;
12593 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12594 info.align_offset = 0;
12595+ info.threadstack_offset = 0;
12596 return vm_unmapped_area(&info);
12597 }
12598
12599@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12600 info.high_limit = current->mm->mmap_base;
12601 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12602 info.align_offset = 0;
12603+ info.threadstack_offset = 0;
12604 addr = vm_unmapped_area(&info);
12605
12606 /*
12607diff --git a/arch/um/Makefile b/arch/um/Makefile
12608index e4b1a96..16162f8 100644
12609--- a/arch/um/Makefile
12610+++ b/arch/um/Makefile
12611@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12612 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12613 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12614
12615+ifdef CONSTIFY_PLUGIN
12616+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12617+endif
12618+
12619 #This will adjust *FLAGS accordingly to the platform.
12620 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12621
12622diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12623index 19e1bdd..3665b77 100644
12624--- a/arch/um/include/asm/cache.h
12625+++ b/arch/um/include/asm/cache.h
12626@@ -1,6 +1,7 @@
12627 #ifndef __UM_CACHE_H
12628 #define __UM_CACHE_H
12629
12630+#include <linux/const.h>
12631
12632 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12633 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12634@@ -12,6 +13,6 @@
12635 # define L1_CACHE_SHIFT 5
12636 #endif
12637
12638-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12639+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12640
12641 #endif
12642diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12643index 2e0a6b1..a64d0f5 100644
12644--- a/arch/um/include/asm/kmap_types.h
12645+++ b/arch/um/include/asm/kmap_types.h
12646@@ -8,6 +8,6 @@
12647
12648 /* No more #include "asm/arch/kmap_types.h" ! */
12649
12650-#define KM_TYPE_NR 14
12651+#define KM_TYPE_NR 15
12652
12653 #endif
12654diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12655index 5ff53d9..5850cdf 100644
12656--- a/arch/um/include/asm/page.h
12657+++ b/arch/um/include/asm/page.h
12658@@ -14,6 +14,9 @@
12659 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12660 #define PAGE_MASK (~(PAGE_SIZE-1))
12661
12662+#define ktla_ktva(addr) (addr)
12663+#define ktva_ktla(addr) (addr)
12664+
12665 #ifndef __ASSEMBLY__
12666
12667 struct page;
12668diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12669index 0032f92..cd151e0 100644
12670--- a/arch/um/include/asm/pgtable-3level.h
12671+++ b/arch/um/include/asm/pgtable-3level.h
12672@@ -58,6 +58,7 @@
12673 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12674 #define pud_populate(mm, pud, pmd) \
12675 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12676+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12677
12678 #ifdef CONFIG_64BIT
12679 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12680diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12681index f17bca8..48adb87 100644
12682--- a/arch/um/kernel/process.c
12683+++ b/arch/um/kernel/process.c
12684@@ -356,22 +356,6 @@ int singlestepping(void * t)
12685 return 2;
12686 }
12687
12688-/*
12689- * Only x86 and x86_64 have an arch_align_stack().
12690- * All other arches have "#define arch_align_stack(x) (x)"
12691- * in their asm/exec.h
12692- * As this is included in UML from asm-um/system-generic.h,
12693- * we can use it to behave as the subarch does.
12694- */
12695-#ifndef arch_align_stack
12696-unsigned long arch_align_stack(unsigned long sp)
12697-{
12698- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12699- sp -= get_random_int() % 8192;
12700- return sp & ~0xf;
12701-}
12702-#endif
12703-
12704 unsigned long get_wchan(struct task_struct *p)
12705 {
12706 unsigned long stack_page, sp, ip;
12707diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12708index ad8f795..2c7eec6 100644
12709--- a/arch/unicore32/include/asm/cache.h
12710+++ b/arch/unicore32/include/asm/cache.h
12711@@ -12,8 +12,10 @@
12712 #ifndef __UNICORE_CACHE_H__
12713 #define __UNICORE_CACHE_H__
12714
12715-#define L1_CACHE_SHIFT (5)
12716-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12717+#include <linux/const.h>
12718+
12719+#define L1_CACHE_SHIFT 5
12720+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12721
12722 /*
12723 * Memory returned by kmalloc() may be used for DMA, so we must make
12724diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12725index d24887b..267d526 100644
12726--- a/arch/x86/Kconfig
12727+++ b/arch/x86/Kconfig
12728@@ -128,7 +128,7 @@ config X86
12729 select RTC_LIB
12730 select HAVE_DEBUG_STACKOVERFLOW
12731 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12732- select HAVE_CC_STACKPROTECTOR
12733+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12734 select GENERIC_CPU_AUTOPROBE
12735 select HAVE_ARCH_AUDITSYSCALL
12736 select ARCH_SUPPORTS_ATOMIC_RMW
12737@@ -253,7 +253,7 @@ config X86_HT
12738
12739 config X86_32_LAZY_GS
12740 def_bool y
12741- depends on X86_32 && !CC_STACKPROTECTOR
12742+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12743
12744 config ARCH_HWEIGHT_CFLAGS
12745 string
12746@@ -549,6 +549,7 @@ config SCHED_OMIT_FRAME_POINTER
12747
12748 menuconfig HYPERVISOR_GUEST
12749 bool "Linux guest support"
12750+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12751 ---help---
12752 Say Y here to enable options for running Linux under various hyper-
12753 visors. This option enables basic hypervisor detection and platform
12754@@ -1076,6 +1077,7 @@ choice
12755
12756 config NOHIGHMEM
12757 bool "off"
12758+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12759 ---help---
12760 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12761 However, the address space of 32-bit x86 processors is only 4
12762@@ -1112,6 +1114,7 @@ config NOHIGHMEM
12763
12764 config HIGHMEM4G
12765 bool "4GB"
12766+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12767 ---help---
12768 Select this if you have a 32-bit processor and between 1 and 4
12769 gigabytes of physical RAM.
12770@@ -1164,7 +1167,7 @@ config PAGE_OFFSET
12771 hex
12772 default 0xB0000000 if VMSPLIT_3G_OPT
12773 default 0x80000000 if VMSPLIT_2G
12774- default 0x78000000 if VMSPLIT_2G_OPT
12775+ default 0x70000000 if VMSPLIT_2G_OPT
12776 default 0x40000000 if VMSPLIT_1G
12777 default 0xC0000000
12778 depends on X86_32
12779@@ -1577,6 +1580,7 @@ source kernel/Kconfig.hz
12780
12781 config KEXEC
12782 bool "kexec system call"
12783+ depends on !GRKERNSEC_KMEM
12784 ---help---
12785 kexec is a system call that implements the ability to shutdown your
12786 current kernel, and to start another kernel. It is like a reboot
12787@@ -1727,7 +1731,9 @@ config X86_NEED_RELOCS
12788
12789 config PHYSICAL_ALIGN
12790 hex "Alignment value to which kernel should be aligned"
12791- default "0x200000"
12792+ default "0x1000000"
12793+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12794+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12795 range 0x2000 0x1000000 if X86_32
12796 range 0x200000 0x1000000 if X86_64
12797 ---help---
12798@@ -1810,6 +1816,7 @@ config COMPAT_VDSO
12799 def_bool n
12800 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12801 depends on X86_32 || IA32_EMULATION
12802+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12803 ---help---
12804 Certain buggy versions of glibc will crash if they are
12805 presented with a 32-bit vDSO that is not mapped at the address
12806diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12807index 6983314..54ad7e8 100644
12808--- a/arch/x86/Kconfig.cpu
12809+++ b/arch/x86/Kconfig.cpu
12810@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12811
12812 config X86_F00F_BUG
12813 def_bool y
12814- depends on M586MMX || M586TSC || M586 || M486
12815+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12816
12817 config X86_INVD_BUG
12818 def_bool y
12819@@ -327,7 +327,7 @@ config X86_INVD_BUG
12820
12821 config X86_ALIGNMENT_16
12822 def_bool y
12823- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12824+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12825
12826 config X86_INTEL_USERCOPY
12827 def_bool y
12828@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12829 # generates cmov.
12830 config X86_CMOV
12831 def_bool y
12832- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12833+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12834
12835 config X86_MINIMUM_CPU_FAMILY
12836 int
12837diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12838index 61bd2ad..50b625d 100644
12839--- a/arch/x86/Kconfig.debug
12840+++ b/arch/x86/Kconfig.debug
12841@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12842 config DEBUG_RODATA
12843 bool "Write protect kernel read-only data structures"
12844 default y
12845- depends on DEBUG_KERNEL
12846+ depends on DEBUG_KERNEL && BROKEN
12847 ---help---
12848 Mark the kernel read-only data as write-protected in the pagetables,
12849 in order to catch accidental (and incorrect) writes to such const
12850@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12851
12852 config DEBUG_SET_MODULE_RONX
12853 bool "Set loadable kernel module data as NX and text as RO"
12854- depends on MODULES
12855+ depends on MODULES && BROKEN
12856 ---help---
12857 This option helps catch unintended modifications to loadable
12858 kernel module's text and read-only data. It also prevents execution
12859diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12860index 33f71b0..c2cefa2 100644
12861--- a/arch/x86/Makefile
12862+++ b/arch/x86/Makefile
12863@@ -71,9 +71,6 @@ ifeq ($(CONFIG_X86_32),y)
12864 # CPU-specific tuning. Anything which can be shared with UML should go here.
12865 include $(srctree)/arch/x86/Makefile_32.cpu
12866 KBUILD_CFLAGS += $(cflags-y)
12867-
12868- # temporary until string.h is fixed
12869- KBUILD_CFLAGS += -ffreestanding
12870 else
12871 BITS := 64
12872 UTS_MACHINE := x86_64
12873@@ -114,6 +111,9 @@ else
12874 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12875 endif
12876
12877+# temporary until string.h is fixed
12878+KBUILD_CFLAGS += -ffreestanding
12879+
12880 # Make sure compiler does not have buggy stack-protector support.
12881 ifdef CONFIG_CC_STACKPROTECTOR
12882 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12883@@ -271,3 +271,12 @@ define archhelp
12884 echo ' FDINITRD=file initrd for the booted kernel'
12885 echo ' kvmconfig - Enable additional options for guest kernel support'
12886 endef
12887+
12888+define OLD_LD
12889+
12890+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12891+*** Please upgrade your binutils to 2.18 or newer
12892+endef
12893+
12894+archprepare:
12895+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12896diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12897index dbe8dd2..2f0a98f 100644
12898--- a/arch/x86/boot/Makefile
12899+++ b/arch/x86/boot/Makefile
12900@@ -52,6 +52,9 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
12901 # ---------------------------------------------------------------------------
12902
12903 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12904+ifdef CONSTIFY_PLUGIN
12905+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12906+endif
12907 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12908 GCOV_PROFILE := n
12909
12910diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12911index 878e4b9..20537ab 100644
12912--- a/arch/x86/boot/bitops.h
12913+++ b/arch/x86/boot/bitops.h
12914@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12915 u8 v;
12916 const u32 *p = (const u32 *)addr;
12917
12918- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12919+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12920 return v;
12921 }
12922
12923@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12924
12925 static inline void set_bit(int nr, void *addr)
12926 {
12927- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12928+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12929 }
12930
12931 #endif /* BOOT_BITOPS_H */
12932diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12933index bd49ec6..94c7f58 100644
12934--- a/arch/x86/boot/boot.h
12935+++ b/arch/x86/boot/boot.h
12936@@ -84,7 +84,7 @@ static inline void io_delay(void)
12937 static inline u16 ds(void)
12938 {
12939 u16 seg;
12940- asm("movw %%ds,%0" : "=rm" (seg));
12941+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12942 return seg;
12943 }
12944
12945diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12946index 0fcd913..3bb5c42 100644
12947--- a/arch/x86/boot/compressed/Makefile
12948+++ b/arch/x86/boot/compressed/Makefile
12949@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
12950 KBUILD_CFLAGS += -mno-mmx -mno-sse
12951 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12952 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12953+ifdef CONSTIFY_PLUGIN
12954+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12955+endif
12956
12957 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12958 GCOV_PROFILE := n
12959diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12960index a53440e..c3dbf1e 100644
12961--- a/arch/x86/boot/compressed/efi_stub_32.S
12962+++ b/arch/x86/boot/compressed/efi_stub_32.S
12963@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12964 * parameter 2, ..., param n. To make things easy, we save the return
12965 * address of efi_call_phys in a global variable.
12966 */
12967- popl %ecx
12968- movl %ecx, saved_return_addr(%edx)
12969- /* get the function pointer into ECX*/
12970- popl %ecx
12971- movl %ecx, efi_rt_function_ptr(%edx)
12972+ popl saved_return_addr(%edx)
12973+ popl efi_rt_function_ptr(%edx)
12974
12975 /*
12976 * 3. Call the physical function.
12977 */
12978- call *%ecx
12979+ call *efi_rt_function_ptr(%edx)
12980
12981 /*
12982 * 4. Balance the stack. And because EAX contain the return value,
12983@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12984 1: popl %edx
12985 subl $1b, %edx
12986
12987- movl efi_rt_function_ptr(%edx), %ecx
12988- pushl %ecx
12989+ pushl efi_rt_function_ptr(%edx)
12990
12991 /*
12992 * 10. Push the saved return address onto the stack and return.
12993 */
12994- movl saved_return_addr(%edx), %ecx
12995- pushl %ecx
12996- ret
12997+ jmpl *saved_return_addr(%edx)
12998 ENDPROC(efi_call_phys)
12999 .previous
13000
13001diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
13002index cbed140..5f2ca57 100644
13003--- a/arch/x86/boot/compressed/head_32.S
13004+++ b/arch/x86/boot/compressed/head_32.S
13005@@ -140,10 +140,10 @@ preferred_addr:
13006 addl %eax, %ebx
13007 notl %eax
13008 andl %eax, %ebx
13009- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13010+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13011 jge 1f
13012 #endif
13013- movl $LOAD_PHYSICAL_ADDR, %ebx
13014+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13015 1:
13016
13017 /* Target address to relocate to for decompression */
13018diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
13019index 2884e0c..904a2f7 100644
13020--- a/arch/x86/boot/compressed/head_64.S
13021+++ b/arch/x86/boot/compressed/head_64.S
13022@@ -94,10 +94,10 @@ ENTRY(startup_32)
13023 addl %eax, %ebx
13024 notl %eax
13025 andl %eax, %ebx
13026- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13027+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13028 jge 1f
13029 #endif
13030- movl $LOAD_PHYSICAL_ADDR, %ebx
13031+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13032 1:
13033
13034 /* Target address to relocate to for decompression */
13035@@ -322,10 +322,10 @@ preferred_addr:
13036 addq %rax, %rbp
13037 notq %rax
13038 andq %rax, %rbp
13039- cmpq $LOAD_PHYSICAL_ADDR, %rbp
13040+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
13041 jge 1f
13042 #endif
13043- movq $LOAD_PHYSICAL_ADDR, %rbp
13044+ movq $____LOAD_PHYSICAL_ADDR, %rbp
13045 1:
13046
13047 /* Target address to relocate to for decompression */
13048@@ -431,8 +431,8 @@ gdt:
13049 .long gdt
13050 .word 0
13051 .quad 0x0000000000000000 /* NULL descriptor */
13052- .quad 0x00af9a000000ffff /* __KERNEL_CS */
13053- .quad 0x00cf92000000ffff /* __KERNEL_DS */
13054+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13055+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13056 .quad 0x0080890000000000 /* TS descriptor */
13057 .quad 0x0000000000000000 /* TS continued */
13058 gdt_end:
13059diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
13060index 57ab74d..7c52182 100644
13061--- a/arch/x86/boot/compressed/misc.c
13062+++ b/arch/x86/boot/compressed/misc.c
13063@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
13064 * Calculate the delta between where vmlinux was linked to load
13065 * and where it was actually loaded.
13066 */
13067- delta = min_addr - LOAD_PHYSICAL_ADDR;
13068+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
13069 if (!delta) {
13070 debug_putstr("No relocation needed... ");
13071 return;
13072@@ -312,7 +312,7 @@ static void parse_elf(void *output)
13073 Elf32_Ehdr ehdr;
13074 Elf32_Phdr *phdrs, *phdr;
13075 #endif
13076- void *dest;
13077+ void *dest, *prev;
13078 int i;
13079
13080 memcpy(&ehdr, output, sizeof(ehdr));
13081@@ -339,13 +339,16 @@ static void parse_elf(void *output)
13082 case PT_LOAD:
13083 #ifdef CONFIG_RELOCATABLE
13084 dest = output;
13085- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
13086+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
13087 #else
13088 dest = (void *)(phdr->p_paddr);
13089 #endif
13090 memcpy(dest,
13091 output + phdr->p_offset,
13092 phdr->p_filesz);
13093+ if (i)
13094+ memset(prev, 0xff, dest - prev);
13095+ prev = dest + phdr->p_filesz;
13096 break;
13097 default: /* Ignore other PT_* */ break;
13098 }
13099@@ -395,7 +398,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
13100 error("Destination address too large");
13101 #endif
13102 #ifndef CONFIG_RELOCATABLE
13103- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
13104+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
13105 error("Wrong destination address");
13106 #endif
13107
13108diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
13109index 1fd7d57..0f7d096 100644
13110--- a/arch/x86/boot/cpucheck.c
13111+++ b/arch/x86/boot/cpucheck.c
13112@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13113 u32 ecx = MSR_K7_HWCR;
13114 u32 eax, edx;
13115
13116- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13117+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13118 eax &= ~(1 << 15);
13119- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13120+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13121
13122 get_cpuflags(); /* Make sure it really did something */
13123 err = check_cpuflags();
13124@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13125 u32 ecx = MSR_VIA_FCR;
13126 u32 eax, edx;
13127
13128- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13129+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13130 eax |= (1<<1)|(1<<7);
13131- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13132+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13133
13134 set_bit(X86_FEATURE_CX8, cpu.flags);
13135 err = check_cpuflags();
13136@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13137 u32 eax, edx;
13138 u32 level = 1;
13139
13140- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13141- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13142- asm("cpuid"
13143+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13144+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13145+ asm volatile("cpuid"
13146 : "+a" (level), "=d" (cpu.flags[0])
13147 : : "ecx", "ebx");
13148- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13149+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13150
13151 err = check_cpuflags();
13152 } else if (err == 0x01 &&
13153diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
13154index 7a6d43a..edf6e40 100644
13155--- a/arch/x86/boot/header.S
13156+++ b/arch/x86/boot/header.S
13157@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
13158 # single linked list of
13159 # struct setup_data
13160
13161-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
13162+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
13163
13164 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
13165+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13166+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
13167+#else
13168 #define VO_INIT_SIZE (VO__end - VO__text)
13169+#endif
13170 #if ZO_INIT_SIZE > VO_INIT_SIZE
13171 #define INIT_SIZE ZO_INIT_SIZE
13172 #else
13173diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
13174index db75d07..8e6d0af 100644
13175--- a/arch/x86/boot/memory.c
13176+++ b/arch/x86/boot/memory.c
13177@@ -19,7 +19,7 @@
13178
13179 static int detect_memory_e820(void)
13180 {
13181- int count = 0;
13182+ unsigned int count = 0;
13183 struct biosregs ireg, oreg;
13184 struct e820entry *desc = boot_params.e820_map;
13185 static struct e820entry buf; /* static so it is zeroed */
13186diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13187index ba3e100..6501b8f 100644
13188--- a/arch/x86/boot/video-vesa.c
13189+++ b/arch/x86/boot/video-vesa.c
13190@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13191
13192 boot_params.screen_info.vesapm_seg = oreg.es;
13193 boot_params.screen_info.vesapm_off = oreg.di;
13194+ boot_params.screen_info.vesapm_size = oreg.cx;
13195 }
13196
13197 /*
13198diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13199index 43eda28..5ab5fdb 100644
13200--- a/arch/x86/boot/video.c
13201+++ b/arch/x86/boot/video.c
13202@@ -96,7 +96,7 @@ static void store_mode_params(void)
13203 static unsigned int get_entry(void)
13204 {
13205 char entry_buf[4];
13206- int i, len = 0;
13207+ unsigned int i, len = 0;
13208 int key;
13209 unsigned int v;
13210
13211diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13212index 9105655..41779c1 100644
13213--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13214+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13215@@ -8,6 +8,8 @@
13216 * including this sentence is retained in full.
13217 */
13218
13219+#include <asm/alternative-asm.h>
13220+
13221 .extern crypto_ft_tab
13222 .extern crypto_it_tab
13223 .extern crypto_fl_tab
13224@@ -70,6 +72,8 @@
13225 je B192; \
13226 leaq 32(r9),r9;
13227
13228+#define ret pax_force_retaddr; ret
13229+
13230 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13231 movq r1,r2; \
13232 movq r3,r4; \
13233diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13234index 477e9d7..c92c7d8 100644
13235--- a/arch/x86/crypto/aesni-intel_asm.S
13236+++ b/arch/x86/crypto/aesni-intel_asm.S
13237@@ -31,6 +31,7 @@
13238
13239 #include <linux/linkage.h>
13240 #include <asm/inst.h>
13241+#include <asm/alternative-asm.h>
13242
13243 #ifdef __x86_64__
13244 .data
13245@@ -205,7 +206,7 @@ enc: .octa 0x2
13246 * num_initial_blocks = b mod 4
13247 * encrypt the initial num_initial_blocks blocks and apply ghash on
13248 * the ciphertext
13249-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13250+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13251 * are clobbered
13252 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13253 */
13254@@ -214,8 +215,8 @@ enc: .octa 0x2
13255 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13256 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13257 mov arg7, %r10 # %r10 = AAD
13258- mov arg8, %r12 # %r12 = aadLen
13259- mov %r12, %r11
13260+ mov arg8, %r15 # %r15 = aadLen
13261+ mov %r15, %r11
13262 pxor %xmm\i, %xmm\i
13263 _get_AAD_loop\num_initial_blocks\operation:
13264 movd (%r10), \TMP1
13265@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13266 psrldq $4, %xmm\i
13267 pxor \TMP1, %xmm\i
13268 add $4, %r10
13269- sub $4, %r12
13270+ sub $4, %r15
13271 jne _get_AAD_loop\num_initial_blocks\operation
13272 cmp $16, %r11
13273 je _get_AAD_loop2_done\num_initial_blocks\operation
13274- mov $16, %r12
13275+ mov $16, %r15
13276 _get_AAD_loop2\num_initial_blocks\operation:
13277 psrldq $4, %xmm\i
13278- sub $4, %r12
13279- cmp %r11, %r12
13280+ sub $4, %r15
13281+ cmp %r11, %r15
13282 jne _get_AAD_loop2\num_initial_blocks\operation
13283 _get_AAD_loop2_done\num_initial_blocks\operation:
13284 movdqa SHUF_MASK(%rip), %xmm14
13285@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13286 * num_initial_blocks = b mod 4
13287 * encrypt the initial num_initial_blocks blocks and apply ghash on
13288 * the ciphertext
13289-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13290+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13291 * are clobbered
13292 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13293 */
13294@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13295 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13296 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13297 mov arg7, %r10 # %r10 = AAD
13298- mov arg8, %r12 # %r12 = aadLen
13299- mov %r12, %r11
13300+ mov arg8, %r15 # %r15 = aadLen
13301+ mov %r15, %r11
13302 pxor %xmm\i, %xmm\i
13303 _get_AAD_loop\num_initial_blocks\operation:
13304 movd (%r10), \TMP1
13305@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13306 psrldq $4, %xmm\i
13307 pxor \TMP1, %xmm\i
13308 add $4, %r10
13309- sub $4, %r12
13310+ sub $4, %r15
13311 jne _get_AAD_loop\num_initial_blocks\operation
13312 cmp $16, %r11
13313 je _get_AAD_loop2_done\num_initial_blocks\operation
13314- mov $16, %r12
13315+ mov $16, %r15
13316 _get_AAD_loop2\num_initial_blocks\operation:
13317 psrldq $4, %xmm\i
13318- sub $4, %r12
13319- cmp %r11, %r12
13320+ sub $4, %r15
13321+ cmp %r11, %r15
13322 jne _get_AAD_loop2\num_initial_blocks\operation
13323 _get_AAD_loop2_done\num_initial_blocks\operation:
13324 movdqa SHUF_MASK(%rip), %xmm14
13325@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13326 *
13327 *****************************************************************************/
13328 ENTRY(aesni_gcm_dec)
13329- push %r12
13330+ push %r15
13331 push %r13
13332 push %r14
13333 mov %rsp, %r14
13334@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13335 */
13336 sub $VARIABLE_OFFSET, %rsp
13337 and $~63, %rsp # align rsp to 64 bytes
13338- mov %arg6, %r12
13339- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13340+ mov %arg6, %r15
13341+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13342 movdqa SHUF_MASK(%rip), %xmm2
13343 PSHUFB_XMM %xmm2, %xmm13
13344
13345@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13346 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13347 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13348 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13349- mov %r13, %r12
13350- and $(3<<4), %r12
13351+ mov %r13, %r15
13352+ and $(3<<4), %r15
13353 jz _initial_num_blocks_is_0_decrypt
13354- cmp $(2<<4), %r12
13355+ cmp $(2<<4), %r15
13356 jb _initial_num_blocks_is_1_decrypt
13357 je _initial_num_blocks_is_2_decrypt
13358 _initial_num_blocks_is_3_decrypt:
13359@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13360 sub $16, %r11
13361 add %r13, %r11
13362 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13363- lea SHIFT_MASK+16(%rip), %r12
13364- sub %r13, %r12
13365+ lea SHIFT_MASK+16(%rip), %r15
13366+ sub %r13, %r15
13367 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13368 # (%r13 is the number of bytes in plaintext mod 16)
13369- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13370+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13371 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13372
13373 movdqa %xmm1, %xmm2
13374 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13375- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13376+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13377 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13378 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13379 pand %xmm1, %xmm2
13380@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13381 sub $1, %r13
13382 jne _less_than_8_bytes_left_decrypt
13383 _multiple_of_16_bytes_decrypt:
13384- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13385- shl $3, %r12 # convert into number of bits
13386- movd %r12d, %xmm15 # len(A) in %xmm15
13387+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13388+ shl $3, %r15 # convert into number of bits
13389+ movd %r15d, %xmm15 # len(A) in %xmm15
13390 shl $3, %arg4 # len(C) in bits (*128)
13391 MOVQ_R64_XMM %arg4, %xmm1
13392 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13393@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13394 mov %r14, %rsp
13395 pop %r14
13396 pop %r13
13397- pop %r12
13398+ pop %r15
13399+ pax_force_retaddr
13400 ret
13401 ENDPROC(aesni_gcm_dec)
13402
13403@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13404 * poly = x^128 + x^127 + x^126 + x^121 + 1
13405 ***************************************************************************/
13406 ENTRY(aesni_gcm_enc)
13407- push %r12
13408+ push %r15
13409 push %r13
13410 push %r14
13411 mov %rsp, %r14
13412@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13413 #
13414 sub $VARIABLE_OFFSET, %rsp
13415 and $~63, %rsp
13416- mov %arg6, %r12
13417- movdqu (%r12), %xmm13
13418+ mov %arg6, %r15
13419+ movdqu (%r15), %xmm13
13420 movdqa SHUF_MASK(%rip), %xmm2
13421 PSHUFB_XMM %xmm2, %xmm13
13422
13423@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13424 movdqa %xmm13, HashKey(%rsp)
13425 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13426 and $-16, %r13
13427- mov %r13, %r12
13428+ mov %r13, %r15
13429
13430 # Encrypt first few blocks
13431
13432- and $(3<<4), %r12
13433+ and $(3<<4), %r15
13434 jz _initial_num_blocks_is_0_encrypt
13435- cmp $(2<<4), %r12
13436+ cmp $(2<<4), %r15
13437 jb _initial_num_blocks_is_1_encrypt
13438 je _initial_num_blocks_is_2_encrypt
13439 _initial_num_blocks_is_3_encrypt:
13440@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13441 sub $16, %r11
13442 add %r13, %r11
13443 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13444- lea SHIFT_MASK+16(%rip), %r12
13445- sub %r13, %r12
13446+ lea SHIFT_MASK+16(%rip), %r15
13447+ sub %r13, %r15
13448 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13449 # (%r13 is the number of bytes in plaintext mod 16)
13450- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13451+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13452 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13453 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13454- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13455+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13456 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13457 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13458 movdqa SHUF_MASK(%rip), %xmm10
13459@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13460 sub $1, %r13
13461 jne _less_than_8_bytes_left_encrypt
13462 _multiple_of_16_bytes_encrypt:
13463- mov arg8, %r12 # %r12 = addLen (number of bytes)
13464- shl $3, %r12
13465- movd %r12d, %xmm15 # len(A) in %xmm15
13466+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13467+ shl $3, %r15
13468+ movd %r15d, %xmm15 # len(A) in %xmm15
13469 shl $3, %arg4 # len(C) in bits (*128)
13470 MOVQ_R64_XMM %arg4, %xmm1
13471 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13472@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13473 mov %r14, %rsp
13474 pop %r14
13475 pop %r13
13476- pop %r12
13477+ pop %r15
13478+ pax_force_retaddr
13479 ret
13480 ENDPROC(aesni_gcm_enc)
13481
13482@@ -1722,6 +1725,7 @@ _key_expansion_256a:
13483 pxor %xmm1, %xmm0
13484 movaps %xmm0, (TKEYP)
13485 add $0x10, TKEYP
13486+ pax_force_retaddr
13487 ret
13488 ENDPROC(_key_expansion_128)
13489 ENDPROC(_key_expansion_256a)
13490@@ -1748,6 +1752,7 @@ _key_expansion_192a:
13491 shufps $0b01001110, %xmm2, %xmm1
13492 movaps %xmm1, 0x10(TKEYP)
13493 add $0x20, TKEYP
13494+ pax_force_retaddr
13495 ret
13496 ENDPROC(_key_expansion_192a)
13497
13498@@ -1768,6 +1773,7 @@ _key_expansion_192b:
13499
13500 movaps %xmm0, (TKEYP)
13501 add $0x10, TKEYP
13502+ pax_force_retaddr
13503 ret
13504 ENDPROC(_key_expansion_192b)
13505
13506@@ -1781,6 +1787,7 @@ _key_expansion_256b:
13507 pxor %xmm1, %xmm2
13508 movaps %xmm2, (TKEYP)
13509 add $0x10, TKEYP
13510+ pax_force_retaddr
13511 ret
13512 ENDPROC(_key_expansion_256b)
13513
13514@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
13515 #ifndef __x86_64__
13516 popl KEYP
13517 #endif
13518+ pax_force_retaddr
13519 ret
13520 ENDPROC(aesni_set_key)
13521
13522@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
13523 popl KLEN
13524 popl KEYP
13525 #endif
13526+ pax_force_retaddr
13527 ret
13528 ENDPROC(aesni_enc)
13529
13530@@ -1974,6 +1983,7 @@ _aesni_enc1:
13531 AESENC KEY STATE
13532 movaps 0x70(TKEYP), KEY
13533 AESENCLAST KEY STATE
13534+ pax_force_retaddr
13535 ret
13536 ENDPROC(_aesni_enc1)
13537
13538@@ -2083,6 +2093,7 @@ _aesni_enc4:
13539 AESENCLAST KEY STATE2
13540 AESENCLAST KEY STATE3
13541 AESENCLAST KEY STATE4
13542+ pax_force_retaddr
13543 ret
13544 ENDPROC(_aesni_enc4)
13545
13546@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
13547 popl KLEN
13548 popl KEYP
13549 #endif
13550+ pax_force_retaddr
13551 ret
13552 ENDPROC(aesni_dec)
13553
13554@@ -2164,6 +2176,7 @@ _aesni_dec1:
13555 AESDEC KEY STATE
13556 movaps 0x70(TKEYP), KEY
13557 AESDECLAST KEY STATE
13558+ pax_force_retaddr
13559 ret
13560 ENDPROC(_aesni_dec1)
13561
13562@@ -2273,6 +2286,7 @@ _aesni_dec4:
13563 AESDECLAST KEY STATE2
13564 AESDECLAST KEY STATE3
13565 AESDECLAST KEY STATE4
13566+ pax_force_retaddr
13567 ret
13568 ENDPROC(_aesni_dec4)
13569
13570@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
13571 popl KEYP
13572 popl LEN
13573 #endif
13574+ pax_force_retaddr
13575 ret
13576 ENDPROC(aesni_ecb_enc)
13577
13578@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
13579 popl KEYP
13580 popl LEN
13581 #endif
13582+ pax_force_retaddr
13583 ret
13584 ENDPROC(aesni_ecb_dec)
13585
13586@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
13587 popl LEN
13588 popl IVP
13589 #endif
13590+ pax_force_retaddr
13591 ret
13592 ENDPROC(aesni_cbc_enc)
13593
13594@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
13595 popl LEN
13596 popl IVP
13597 #endif
13598+ pax_force_retaddr
13599 ret
13600 ENDPROC(aesni_cbc_dec)
13601
13602@@ -2550,6 +2568,7 @@ _aesni_inc_init:
13603 mov $1, TCTR_LOW
13604 MOVQ_R64_XMM TCTR_LOW INC
13605 MOVQ_R64_XMM CTR TCTR_LOW
13606+ pax_force_retaddr
13607 ret
13608 ENDPROC(_aesni_inc_init)
13609
13610@@ -2579,6 +2598,7 @@ _aesni_inc:
13611 .Linc_low:
13612 movaps CTR, IV
13613 PSHUFB_XMM BSWAP_MASK IV
13614+ pax_force_retaddr
13615 ret
13616 ENDPROC(_aesni_inc)
13617
13618@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
13619 .Lctr_enc_ret:
13620 movups IV, (IVP)
13621 .Lctr_enc_just_ret:
13622+ pax_force_retaddr
13623 ret
13624 ENDPROC(aesni_ctr_enc)
13625
13626@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
13627 pxor INC, STATE4
13628 movdqu STATE4, 0x70(OUTP)
13629
13630+ pax_force_retaddr
13631 ret
13632 ENDPROC(aesni_xts_crypt8)
13633
13634diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13635index 246c670..466e2d6 100644
13636--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13637+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13638@@ -21,6 +21,7 @@
13639 */
13640
13641 #include <linux/linkage.h>
13642+#include <asm/alternative-asm.h>
13643
13644 .file "blowfish-x86_64-asm.S"
13645 .text
13646@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13647 jnz .L__enc_xor;
13648
13649 write_block();
13650+ pax_force_retaddr
13651 ret;
13652 .L__enc_xor:
13653 xor_block();
13654+ pax_force_retaddr
13655 ret;
13656 ENDPROC(__blowfish_enc_blk)
13657
13658@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13659
13660 movq %r11, %rbp;
13661
13662+ pax_force_retaddr
13663 ret;
13664 ENDPROC(blowfish_dec_blk)
13665
13666@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13667
13668 popq %rbx;
13669 popq %rbp;
13670+ pax_force_retaddr
13671 ret;
13672
13673 .L__enc_xor4:
13674@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13675
13676 popq %rbx;
13677 popq %rbp;
13678+ pax_force_retaddr
13679 ret;
13680 ENDPROC(__blowfish_enc_blk_4way)
13681
13682@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13683 popq %rbx;
13684 popq %rbp;
13685
13686+ pax_force_retaddr
13687 ret;
13688 ENDPROC(blowfish_dec_blk_4way)
13689diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13690index ce71f92..1dce7ec 100644
13691--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13692+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13693@@ -16,6 +16,7 @@
13694 */
13695
13696 #include <linux/linkage.h>
13697+#include <asm/alternative-asm.h>
13698
13699 #define CAMELLIA_TABLE_BYTE_LEN 272
13700
13701@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13702 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13703 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13704 %rcx, (%r9));
13705+ pax_force_retaddr
13706 ret;
13707 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13708
13709@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13710 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13711 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13712 %rax, (%r9));
13713+ pax_force_retaddr
13714 ret;
13715 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13716
13717@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13718 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13719 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13720
13721+ pax_force_retaddr
13722 ret;
13723
13724 .align 8
13725@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13726 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13727 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13728
13729+ pax_force_retaddr
13730 ret;
13731
13732 .align 8
13733@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13734 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13735 %xmm8, %rsi);
13736
13737+ pax_force_retaddr
13738 ret;
13739 ENDPROC(camellia_ecb_enc_16way)
13740
13741@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13742 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13743 %xmm8, %rsi);
13744
13745+ pax_force_retaddr
13746 ret;
13747 ENDPROC(camellia_ecb_dec_16way)
13748
13749@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13750 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13751 %xmm8, %rsi);
13752
13753+ pax_force_retaddr
13754 ret;
13755 ENDPROC(camellia_cbc_dec_16way)
13756
13757@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13758 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13759 %xmm8, %rsi);
13760
13761+ pax_force_retaddr
13762 ret;
13763 ENDPROC(camellia_ctr_16way)
13764
13765@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13766 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13767 %xmm8, %rsi);
13768
13769+ pax_force_retaddr
13770 ret;
13771 ENDPROC(camellia_xts_crypt_16way)
13772
13773diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13774index 0e0b886..5a3123c 100644
13775--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13776+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13777@@ -11,6 +11,7 @@
13778 */
13779
13780 #include <linux/linkage.h>
13781+#include <asm/alternative-asm.h>
13782
13783 #define CAMELLIA_TABLE_BYTE_LEN 272
13784
13785@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13786 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13787 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13788 %rcx, (%r9));
13789+ pax_force_retaddr
13790 ret;
13791 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13792
13793@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13794 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13795 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13796 %rax, (%r9));
13797+ pax_force_retaddr
13798 ret;
13799 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13800
13801@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13802 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13803 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13804
13805+ pax_force_retaddr
13806 ret;
13807
13808 .align 8
13809@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13810 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13811 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13812
13813+ pax_force_retaddr
13814 ret;
13815
13816 .align 8
13817@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13818
13819 vzeroupper;
13820
13821+ pax_force_retaddr
13822 ret;
13823 ENDPROC(camellia_ecb_enc_32way)
13824
13825@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13826
13827 vzeroupper;
13828
13829+ pax_force_retaddr
13830 ret;
13831 ENDPROC(camellia_ecb_dec_32way)
13832
13833@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13834
13835 vzeroupper;
13836
13837+ pax_force_retaddr
13838 ret;
13839 ENDPROC(camellia_cbc_dec_32way)
13840
13841@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13842
13843 vzeroupper;
13844
13845+ pax_force_retaddr
13846 ret;
13847 ENDPROC(camellia_ctr_32way)
13848
13849@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13850
13851 vzeroupper;
13852
13853+ pax_force_retaddr
13854 ret;
13855 ENDPROC(camellia_xts_crypt_32way)
13856
13857diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13858index 310319c..db3d7b5 100644
13859--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13860+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13861@@ -21,6 +21,7 @@
13862 */
13863
13864 #include <linux/linkage.h>
13865+#include <asm/alternative-asm.h>
13866
13867 .file "camellia-x86_64-asm_64.S"
13868 .text
13869@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13870 enc_outunpack(mov, RT1);
13871
13872 movq RRBP, %rbp;
13873+ pax_force_retaddr
13874 ret;
13875
13876 .L__enc_xor:
13877 enc_outunpack(xor, RT1);
13878
13879 movq RRBP, %rbp;
13880+ pax_force_retaddr
13881 ret;
13882 ENDPROC(__camellia_enc_blk)
13883
13884@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13885 dec_outunpack();
13886
13887 movq RRBP, %rbp;
13888+ pax_force_retaddr
13889 ret;
13890 ENDPROC(camellia_dec_blk)
13891
13892@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13893
13894 movq RRBP, %rbp;
13895 popq %rbx;
13896+ pax_force_retaddr
13897 ret;
13898
13899 .L__enc2_xor:
13900@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13901
13902 movq RRBP, %rbp;
13903 popq %rbx;
13904+ pax_force_retaddr
13905 ret;
13906 ENDPROC(__camellia_enc_blk_2way)
13907
13908@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13909
13910 movq RRBP, %rbp;
13911 movq RXOR, %rbx;
13912+ pax_force_retaddr
13913 ret;
13914 ENDPROC(camellia_dec_blk_2way)
13915diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13916index c35fd5d..2d8c7db 100644
13917--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13918+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13919@@ -24,6 +24,7 @@
13920 */
13921
13922 #include <linux/linkage.h>
13923+#include <asm/alternative-asm.h>
13924
13925 .file "cast5-avx-x86_64-asm_64.S"
13926
13927@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13928 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13929 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13930
13931+ pax_force_retaddr
13932 ret;
13933 ENDPROC(__cast5_enc_blk16)
13934
13935@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13936 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13937 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13938
13939+ pax_force_retaddr
13940 ret;
13941
13942 .L__skip_dec:
13943@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13944 vmovdqu RR4, (6*4*4)(%r11);
13945 vmovdqu RL4, (7*4*4)(%r11);
13946
13947+ pax_force_retaddr
13948 ret;
13949 ENDPROC(cast5_ecb_enc_16way)
13950
13951@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13952 vmovdqu RR4, (6*4*4)(%r11);
13953 vmovdqu RL4, (7*4*4)(%r11);
13954
13955+ pax_force_retaddr
13956 ret;
13957 ENDPROC(cast5_ecb_dec_16way)
13958
13959@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13960 * %rdx: src
13961 */
13962
13963- pushq %r12;
13964+ pushq %r14;
13965
13966 movq %rsi, %r11;
13967- movq %rdx, %r12;
13968+ movq %rdx, %r14;
13969
13970 vmovdqu (0*16)(%rdx), RL1;
13971 vmovdqu (1*16)(%rdx), RR1;
13972@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13973 call __cast5_dec_blk16;
13974
13975 /* xor with src */
13976- vmovq (%r12), RX;
13977+ vmovq (%r14), RX;
13978 vpshufd $0x4f, RX, RX;
13979 vpxor RX, RR1, RR1;
13980- vpxor 0*16+8(%r12), RL1, RL1;
13981- vpxor 1*16+8(%r12), RR2, RR2;
13982- vpxor 2*16+8(%r12), RL2, RL2;
13983- vpxor 3*16+8(%r12), RR3, RR3;
13984- vpxor 4*16+8(%r12), RL3, RL3;
13985- vpxor 5*16+8(%r12), RR4, RR4;
13986- vpxor 6*16+8(%r12), RL4, RL4;
13987+ vpxor 0*16+8(%r14), RL1, RL1;
13988+ vpxor 1*16+8(%r14), RR2, RR2;
13989+ vpxor 2*16+8(%r14), RL2, RL2;
13990+ vpxor 3*16+8(%r14), RR3, RR3;
13991+ vpxor 4*16+8(%r14), RL3, RL3;
13992+ vpxor 5*16+8(%r14), RR4, RR4;
13993+ vpxor 6*16+8(%r14), RL4, RL4;
13994
13995 vmovdqu RR1, (0*16)(%r11);
13996 vmovdqu RL1, (1*16)(%r11);
13997@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13998 vmovdqu RR4, (6*16)(%r11);
13999 vmovdqu RL4, (7*16)(%r11);
14000
14001- popq %r12;
14002+ popq %r14;
14003
14004+ pax_force_retaddr
14005 ret;
14006 ENDPROC(cast5_cbc_dec_16way)
14007
14008@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
14009 * %rcx: iv (big endian, 64bit)
14010 */
14011
14012- pushq %r12;
14013+ pushq %r14;
14014
14015 movq %rsi, %r11;
14016- movq %rdx, %r12;
14017+ movq %rdx, %r14;
14018
14019 vpcmpeqd RTMP, RTMP, RTMP;
14020 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
14021@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
14022 call __cast5_enc_blk16;
14023
14024 /* dst = src ^ iv */
14025- vpxor (0*16)(%r12), RR1, RR1;
14026- vpxor (1*16)(%r12), RL1, RL1;
14027- vpxor (2*16)(%r12), RR2, RR2;
14028- vpxor (3*16)(%r12), RL2, RL2;
14029- vpxor (4*16)(%r12), RR3, RR3;
14030- vpxor (5*16)(%r12), RL3, RL3;
14031- vpxor (6*16)(%r12), RR4, RR4;
14032- vpxor (7*16)(%r12), RL4, RL4;
14033+ vpxor (0*16)(%r14), RR1, RR1;
14034+ vpxor (1*16)(%r14), RL1, RL1;
14035+ vpxor (2*16)(%r14), RR2, RR2;
14036+ vpxor (3*16)(%r14), RL2, RL2;
14037+ vpxor (4*16)(%r14), RR3, RR3;
14038+ vpxor (5*16)(%r14), RL3, RL3;
14039+ vpxor (6*16)(%r14), RR4, RR4;
14040+ vpxor (7*16)(%r14), RL4, RL4;
14041 vmovdqu RR1, (0*16)(%r11);
14042 vmovdqu RL1, (1*16)(%r11);
14043 vmovdqu RR2, (2*16)(%r11);
14044@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
14045 vmovdqu RR4, (6*16)(%r11);
14046 vmovdqu RL4, (7*16)(%r11);
14047
14048- popq %r12;
14049+ popq %r14;
14050
14051+ pax_force_retaddr
14052 ret;
14053 ENDPROC(cast5_ctr_16way)
14054diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14055index e3531f8..e123f35 100644
14056--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14057+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14058@@ -24,6 +24,7 @@
14059 */
14060
14061 #include <linux/linkage.h>
14062+#include <asm/alternative-asm.h>
14063 #include "glue_helper-asm-avx.S"
14064
14065 .file "cast6-avx-x86_64-asm_64.S"
14066@@ -295,6 +296,7 @@ __cast6_enc_blk8:
14067 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14068 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14069
14070+ pax_force_retaddr
14071 ret;
14072 ENDPROC(__cast6_enc_blk8)
14073
14074@@ -340,6 +342,7 @@ __cast6_dec_blk8:
14075 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14076 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14077
14078+ pax_force_retaddr
14079 ret;
14080 ENDPROC(__cast6_dec_blk8)
14081
14082@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
14083
14084 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14085
14086+ pax_force_retaddr
14087 ret;
14088 ENDPROC(cast6_ecb_enc_8way)
14089
14090@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
14091
14092 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14093
14094+ pax_force_retaddr
14095 ret;
14096 ENDPROC(cast6_ecb_dec_8way)
14097
14098@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
14099 * %rdx: src
14100 */
14101
14102- pushq %r12;
14103+ pushq %r14;
14104
14105 movq %rsi, %r11;
14106- movq %rdx, %r12;
14107+ movq %rdx, %r14;
14108
14109 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14110
14111 call __cast6_dec_blk8;
14112
14113- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14114+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14115
14116- popq %r12;
14117+ popq %r14;
14118
14119+ pax_force_retaddr
14120 ret;
14121 ENDPROC(cast6_cbc_dec_8way)
14122
14123@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
14124 * %rcx: iv (little endian, 128bit)
14125 */
14126
14127- pushq %r12;
14128+ pushq %r14;
14129
14130 movq %rsi, %r11;
14131- movq %rdx, %r12;
14132+ movq %rdx, %r14;
14133
14134 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14135 RD2, RX, RKR, RKM);
14136
14137 call __cast6_enc_blk8;
14138
14139- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14140+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14141
14142- popq %r12;
14143+ popq %r14;
14144
14145+ pax_force_retaddr
14146 ret;
14147 ENDPROC(cast6_ctr_8way)
14148
14149@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
14150 /* dst <= regs xor IVs(in dst) */
14151 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14152
14153+ pax_force_retaddr
14154 ret;
14155 ENDPROC(cast6_xts_enc_8way)
14156
14157@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
14158 /* dst <= regs xor IVs(in dst) */
14159 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14160
14161+ pax_force_retaddr
14162 ret;
14163 ENDPROC(cast6_xts_dec_8way)
14164diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14165index dbc4339..de6e120 100644
14166--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14167+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14168@@ -45,6 +45,7 @@
14169
14170 #include <asm/inst.h>
14171 #include <linux/linkage.h>
14172+#include <asm/alternative-asm.h>
14173
14174 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
14175
14176@@ -312,6 +313,7 @@ do_return:
14177 popq %rsi
14178 popq %rdi
14179 popq %rbx
14180+ pax_force_retaddr
14181 ret
14182
14183 ################################################################
14184diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14185index 5d1e007..098cb4f 100644
14186--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14187+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14188@@ -18,6 +18,7 @@
14189
14190 #include <linux/linkage.h>
14191 #include <asm/inst.h>
14192+#include <asm/alternative-asm.h>
14193
14194 .data
14195
14196@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14197 psrlq $1, T2
14198 pxor T2, T1
14199 pxor T1, DATA
14200+ pax_force_retaddr
14201 ret
14202 ENDPROC(__clmul_gf128mul_ble)
14203
14204@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14205 call __clmul_gf128mul_ble
14206 PSHUFB_XMM BSWAP DATA
14207 movups DATA, (%rdi)
14208+ pax_force_retaddr
14209 ret
14210 ENDPROC(clmul_ghash_mul)
14211
14212@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14213 PSHUFB_XMM BSWAP DATA
14214 movups DATA, (%rdi)
14215 .Lupdate_just_ret:
14216+ pax_force_retaddr
14217 ret
14218 ENDPROC(clmul_ghash_update)
14219diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14220index 9279e0b..c4b3d2c 100644
14221--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14222+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14223@@ -1,4 +1,5 @@
14224 #include <linux/linkage.h>
14225+#include <asm/alternative-asm.h>
14226
14227 # enter salsa20_encrypt_bytes
14228 ENTRY(salsa20_encrypt_bytes)
14229@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14230 add %r11,%rsp
14231 mov %rdi,%rax
14232 mov %rsi,%rdx
14233+ pax_force_retaddr
14234 ret
14235 # bytesatleast65:
14236 ._bytesatleast65:
14237@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14238 add %r11,%rsp
14239 mov %rdi,%rax
14240 mov %rsi,%rdx
14241+ pax_force_retaddr
14242 ret
14243 ENDPROC(salsa20_keysetup)
14244
14245@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14246 add %r11,%rsp
14247 mov %rdi,%rax
14248 mov %rsi,%rdx
14249+ pax_force_retaddr
14250 ret
14251 ENDPROC(salsa20_ivsetup)
14252diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14253index 2f202f4..d9164d6 100644
14254--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14255+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14256@@ -24,6 +24,7 @@
14257 */
14258
14259 #include <linux/linkage.h>
14260+#include <asm/alternative-asm.h>
14261 #include "glue_helper-asm-avx.S"
14262
14263 .file "serpent-avx-x86_64-asm_64.S"
14264@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14265 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14266 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14267
14268+ pax_force_retaddr
14269 ret;
14270 ENDPROC(__serpent_enc_blk8_avx)
14271
14272@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14273 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14274 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14275
14276+ pax_force_retaddr
14277 ret;
14278 ENDPROC(__serpent_dec_blk8_avx)
14279
14280@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14281
14282 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14283
14284+ pax_force_retaddr
14285 ret;
14286 ENDPROC(serpent_ecb_enc_8way_avx)
14287
14288@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14289
14290 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14291
14292+ pax_force_retaddr
14293 ret;
14294 ENDPROC(serpent_ecb_dec_8way_avx)
14295
14296@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14297
14298 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14299
14300+ pax_force_retaddr
14301 ret;
14302 ENDPROC(serpent_cbc_dec_8way_avx)
14303
14304@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14305
14306 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14307
14308+ pax_force_retaddr
14309 ret;
14310 ENDPROC(serpent_ctr_8way_avx)
14311
14312@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14313 /* dst <= regs xor IVs(in dst) */
14314 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14315
14316+ pax_force_retaddr
14317 ret;
14318 ENDPROC(serpent_xts_enc_8way_avx)
14319
14320@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14321 /* dst <= regs xor IVs(in dst) */
14322 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14323
14324+ pax_force_retaddr
14325 ret;
14326 ENDPROC(serpent_xts_dec_8way_avx)
14327diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14328index b222085..abd483c 100644
14329--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14330+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14331@@ -15,6 +15,7 @@
14332 */
14333
14334 #include <linux/linkage.h>
14335+#include <asm/alternative-asm.h>
14336 #include "glue_helper-asm-avx2.S"
14337
14338 .file "serpent-avx2-asm_64.S"
14339@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14340 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14341 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14342
14343+ pax_force_retaddr
14344 ret;
14345 ENDPROC(__serpent_enc_blk16)
14346
14347@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14348 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14349 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14350
14351+ pax_force_retaddr
14352 ret;
14353 ENDPROC(__serpent_dec_blk16)
14354
14355@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14356
14357 vzeroupper;
14358
14359+ pax_force_retaddr
14360 ret;
14361 ENDPROC(serpent_ecb_enc_16way)
14362
14363@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14364
14365 vzeroupper;
14366
14367+ pax_force_retaddr
14368 ret;
14369 ENDPROC(serpent_ecb_dec_16way)
14370
14371@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14372
14373 vzeroupper;
14374
14375+ pax_force_retaddr
14376 ret;
14377 ENDPROC(serpent_cbc_dec_16way)
14378
14379@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14380
14381 vzeroupper;
14382
14383+ pax_force_retaddr
14384 ret;
14385 ENDPROC(serpent_ctr_16way)
14386
14387@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14388
14389 vzeroupper;
14390
14391+ pax_force_retaddr
14392 ret;
14393 ENDPROC(serpent_xts_enc_16way)
14394
14395@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14396
14397 vzeroupper;
14398
14399+ pax_force_retaddr
14400 ret;
14401 ENDPROC(serpent_xts_dec_16way)
14402diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14403index acc066c..1559cc4 100644
14404--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14405+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14406@@ -25,6 +25,7 @@
14407 */
14408
14409 #include <linux/linkage.h>
14410+#include <asm/alternative-asm.h>
14411
14412 .file "serpent-sse2-x86_64-asm_64.S"
14413 .text
14414@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14415 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14416 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14417
14418+ pax_force_retaddr
14419 ret;
14420
14421 .L__enc_xor8:
14422 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14423 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14424
14425+ pax_force_retaddr
14426 ret;
14427 ENDPROC(__serpent_enc_blk_8way)
14428
14429@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14430 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14431 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14432
14433+ pax_force_retaddr
14434 ret;
14435 ENDPROC(serpent_dec_blk_8way)
14436diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14437index a410950..9dfe7ad 100644
14438--- a/arch/x86/crypto/sha1_ssse3_asm.S
14439+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14440@@ -29,6 +29,7 @@
14441 */
14442
14443 #include <linux/linkage.h>
14444+#include <asm/alternative-asm.h>
14445
14446 #define CTX %rdi // arg1
14447 #define BUF %rsi // arg2
14448@@ -75,9 +76,9 @@
14449
14450 push %rbx
14451 push %rbp
14452- push %r12
14453+ push %r14
14454
14455- mov %rsp, %r12
14456+ mov %rsp, %r14
14457 sub $64, %rsp # allocate workspace
14458 and $~15, %rsp # align stack
14459
14460@@ -99,11 +100,12 @@
14461 xor %rax, %rax
14462 rep stosq
14463
14464- mov %r12, %rsp # deallocate workspace
14465+ mov %r14, %rsp # deallocate workspace
14466
14467- pop %r12
14468+ pop %r14
14469 pop %rbp
14470 pop %rbx
14471+ pax_force_retaddr
14472 ret
14473
14474 ENDPROC(\name)
14475diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14476index 642f156..51a513c 100644
14477--- a/arch/x86/crypto/sha256-avx-asm.S
14478+++ b/arch/x86/crypto/sha256-avx-asm.S
14479@@ -49,6 +49,7 @@
14480
14481 #ifdef CONFIG_AS_AVX
14482 #include <linux/linkage.h>
14483+#include <asm/alternative-asm.h>
14484
14485 ## assume buffers not aligned
14486 #define VMOVDQ vmovdqu
14487@@ -460,6 +461,7 @@ done_hash:
14488 popq %r13
14489 popq %rbp
14490 popq %rbx
14491+ pax_force_retaddr
14492 ret
14493 ENDPROC(sha256_transform_avx)
14494
14495diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14496index 9e86944..3795e6a 100644
14497--- a/arch/x86/crypto/sha256-avx2-asm.S
14498+++ b/arch/x86/crypto/sha256-avx2-asm.S
14499@@ -50,6 +50,7 @@
14500
14501 #ifdef CONFIG_AS_AVX2
14502 #include <linux/linkage.h>
14503+#include <asm/alternative-asm.h>
14504
14505 ## assume buffers not aligned
14506 #define VMOVDQ vmovdqu
14507@@ -720,6 +721,7 @@ done_hash:
14508 popq %r12
14509 popq %rbp
14510 popq %rbx
14511+ pax_force_retaddr
14512 ret
14513 ENDPROC(sha256_transform_rorx)
14514
14515diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14516index f833b74..8c62a9e 100644
14517--- a/arch/x86/crypto/sha256-ssse3-asm.S
14518+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14519@@ -47,6 +47,7 @@
14520 ########################################################################
14521
14522 #include <linux/linkage.h>
14523+#include <asm/alternative-asm.h>
14524
14525 ## assume buffers not aligned
14526 #define MOVDQ movdqu
14527@@ -471,6 +472,7 @@ done_hash:
14528 popq %rbp
14529 popq %rbx
14530
14531+ pax_force_retaddr
14532 ret
14533 ENDPROC(sha256_transform_ssse3)
14534
14535diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14536index 974dde9..a823ff9 100644
14537--- a/arch/x86/crypto/sha512-avx-asm.S
14538+++ b/arch/x86/crypto/sha512-avx-asm.S
14539@@ -49,6 +49,7 @@
14540
14541 #ifdef CONFIG_AS_AVX
14542 #include <linux/linkage.h>
14543+#include <asm/alternative-asm.h>
14544
14545 .text
14546
14547@@ -364,6 +365,7 @@ updateblock:
14548 mov frame_RSPSAVE(%rsp), %rsp
14549
14550 nowork:
14551+ pax_force_retaddr
14552 ret
14553 ENDPROC(sha512_transform_avx)
14554
14555diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14556index 568b961..ed20c37 100644
14557--- a/arch/x86/crypto/sha512-avx2-asm.S
14558+++ b/arch/x86/crypto/sha512-avx2-asm.S
14559@@ -51,6 +51,7 @@
14560
14561 #ifdef CONFIG_AS_AVX2
14562 #include <linux/linkage.h>
14563+#include <asm/alternative-asm.h>
14564
14565 .text
14566
14567@@ -678,6 +679,7 @@ done_hash:
14568
14569 # Restore Stack Pointer
14570 mov frame_RSPSAVE(%rsp), %rsp
14571+ pax_force_retaddr
14572 ret
14573 ENDPROC(sha512_transform_rorx)
14574
14575diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14576index fb56855..6edd768 100644
14577--- a/arch/x86/crypto/sha512-ssse3-asm.S
14578+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14579@@ -48,6 +48,7 @@
14580 ########################################################################
14581
14582 #include <linux/linkage.h>
14583+#include <asm/alternative-asm.h>
14584
14585 .text
14586
14587@@ -363,6 +364,7 @@ updateblock:
14588 mov frame_RSPSAVE(%rsp), %rsp
14589
14590 nowork:
14591+ pax_force_retaddr
14592 ret
14593 ENDPROC(sha512_transform_ssse3)
14594
14595diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14596index 0505813..b067311 100644
14597--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14598+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14599@@ -24,6 +24,7 @@
14600 */
14601
14602 #include <linux/linkage.h>
14603+#include <asm/alternative-asm.h>
14604 #include "glue_helper-asm-avx.S"
14605
14606 .file "twofish-avx-x86_64-asm_64.S"
14607@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14608 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14609 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14610
14611+ pax_force_retaddr
14612 ret;
14613 ENDPROC(__twofish_enc_blk8)
14614
14615@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14616 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14617 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14618
14619+ pax_force_retaddr
14620 ret;
14621 ENDPROC(__twofish_dec_blk8)
14622
14623@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14624
14625 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14626
14627+ pax_force_retaddr
14628 ret;
14629 ENDPROC(twofish_ecb_enc_8way)
14630
14631@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14632
14633 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14634
14635+ pax_force_retaddr
14636 ret;
14637 ENDPROC(twofish_ecb_dec_8way)
14638
14639@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14640 * %rdx: src
14641 */
14642
14643- pushq %r12;
14644+ pushq %r14;
14645
14646 movq %rsi, %r11;
14647- movq %rdx, %r12;
14648+ movq %rdx, %r14;
14649
14650 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14651
14652 call __twofish_dec_blk8;
14653
14654- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14655+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14656
14657- popq %r12;
14658+ popq %r14;
14659
14660+ pax_force_retaddr
14661 ret;
14662 ENDPROC(twofish_cbc_dec_8way)
14663
14664@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14665 * %rcx: iv (little endian, 128bit)
14666 */
14667
14668- pushq %r12;
14669+ pushq %r14;
14670
14671 movq %rsi, %r11;
14672- movq %rdx, %r12;
14673+ movq %rdx, %r14;
14674
14675 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14676 RD2, RX0, RX1, RY0);
14677
14678 call __twofish_enc_blk8;
14679
14680- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14681+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14682
14683- popq %r12;
14684+ popq %r14;
14685
14686+ pax_force_retaddr
14687 ret;
14688 ENDPROC(twofish_ctr_8way)
14689
14690@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14691 /* dst <= regs xor IVs(in dst) */
14692 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14693
14694+ pax_force_retaddr
14695 ret;
14696 ENDPROC(twofish_xts_enc_8way)
14697
14698@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14699 /* dst <= regs xor IVs(in dst) */
14700 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14701
14702+ pax_force_retaddr
14703 ret;
14704 ENDPROC(twofish_xts_dec_8way)
14705diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14706index 1c3b7ce..02f578d 100644
14707--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14708+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14709@@ -21,6 +21,7 @@
14710 */
14711
14712 #include <linux/linkage.h>
14713+#include <asm/alternative-asm.h>
14714
14715 .file "twofish-x86_64-asm-3way.S"
14716 .text
14717@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14718 popq %r13;
14719 popq %r14;
14720 popq %r15;
14721+ pax_force_retaddr
14722 ret;
14723
14724 .L__enc_xor3:
14725@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14726 popq %r13;
14727 popq %r14;
14728 popq %r15;
14729+ pax_force_retaddr
14730 ret;
14731 ENDPROC(__twofish_enc_blk_3way)
14732
14733@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14734 popq %r13;
14735 popq %r14;
14736 popq %r15;
14737+ pax_force_retaddr
14738 ret;
14739 ENDPROC(twofish_dec_blk_3way)
14740diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14741index a039d21..524b8b2 100644
14742--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14743+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14744@@ -22,6 +22,7 @@
14745
14746 #include <linux/linkage.h>
14747 #include <asm/asm-offsets.h>
14748+#include <asm/alternative-asm.h>
14749
14750 #define a_offset 0
14751 #define b_offset 4
14752@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14753
14754 popq R1
14755 movq $1,%rax
14756+ pax_force_retaddr
14757 ret
14758 ENDPROC(twofish_enc_blk)
14759
14760@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14761
14762 popq R1
14763 movq $1,%rax
14764+ pax_force_retaddr
14765 ret
14766 ENDPROC(twofish_dec_blk)
14767diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14768index d21ff89..6da8e6e 100644
14769--- a/arch/x86/ia32/ia32_aout.c
14770+++ b/arch/x86/ia32/ia32_aout.c
14771@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14772 unsigned long dump_start, dump_size;
14773 struct user32 dump;
14774
14775+ memset(&dump, 0, sizeof(dump));
14776+
14777 fs = get_fs();
14778 set_fs(KERNEL_DS);
14779 has_dumped = 1;
14780diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14781index f9e181a..b0df8b3 100644
14782--- a/arch/x86/ia32/ia32_signal.c
14783+++ b/arch/x86/ia32/ia32_signal.c
14784@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14785 if (__get_user(set.sig[0], &frame->sc.oldmask)
14786 || (_COMPAT_NSIG_WORDS > 1
14787 && __copy_from_user((((char *) &set.sig) + 4),
14788- &frame->extramask,
14789+ frame->extramask,
14790 sizeof(frame->extramask))))
14791 goto badframe;
14792
14793@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14794 sp -= frame_size;
14795 /* Align the stack pointer according to the i386 ABI,
14796 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14797- sp = ((sp + 4) & -16ul) - 4;
14798+ sp = ((sp - 12) & -16ul) - 4;
14799 return (void __user *) sp;
14800 }
14801
14802@@ -386,7 +386,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14803 restorer = current->mm->context.vdso +
14804 selected_vdso32->sym___kernel_sigreturn;
14805 else
14806- restorer = &frame->retcode;
14807+ restorer = frame->retcode;
14808 }
14809
14810 put_user_try {
14811@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14812 * These are actually not used anymore, but left because some
14813 * gdb versions depend on them as a marker.
14814 */
14815- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14816+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14817 } put_user_catch(err);
14818
14819 if (err)
14820@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14821 0xb8,
14822 __NR_ia32_rt_sigreturn,
14823 0x80cd,
14824- 0,
14825+ 0
14826 };
14827
14828 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14829@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14830
14831 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14832 restorer = ksig->ka.sa.sa_restorer;
14833- else
14834+ else if (current->mm->context.vdso)
14835+ /* Return stub is in 32bit vsyscall page */
14836 restorer = current->mm->context.vdso +
14837 selected_vdso32->sym___kernel_rt_sigreturn;
14838+ else
14839+ restorer = frame->retcode;
14840 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14841
14842 /*
14843 * Not actually used anymore, but left because some gdb
14844 * versions need it.
14845 */
14846- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14847+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14848 } put_user_catch(err);
14849
14850 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14851diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14852index 4299eb0..fefe70e 100644
14853--- a/arch/x86/ia32/ia32entry.S
14854+++ b/arch/x86/ia32/ia32entry.S
14855@@ -15,8 +15,10 @@
14856 #include <asm/irqflags.h>
14857 #include <asm/asm.h>
14858 #include <asm/smap.h>
14859+#include <asm/pgtable.h>
14860 #include <linux/linkage.h>
14861 #include <linux/err.h>
14862+#include <asm/alternative-asm.h>
14863
14864 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14865 #include <linux/elf-em.h>
14866@@ -62,12 +64,12 @@
14867 */
14868 .macro LOAD_ARGS32 offset, _r9=0
14869 .if \_r9
14870- movl \offset+16(%rsp),%r9d
14871+ movl \offset+R9(%rsp),%r9d
14872 .endif
14873- movl \offset+40(%rsp),%ecx
14874- movl \offset+48(%rsp),%edx
14875- movl \offset+56(%rsp),%esi
14876- movl \offset+64(%rsp),%edi
14877+ movl \offset+RCX(%rsp),%ecx
14878+ movl \offset+RDX(%rsp),%edx
14879+ movl \offset+RSI(%rsp),%esi
14880+ movl \offset+RDI(%rsp),%edi
14881 movl %eax,%eax /* zero extension */
14882 .endm
14883
14884@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14885 ENDPROC(native_irq_enable_sysexit)
14886 #endif
14887
14888+ .macro pax_enter_kernel_user
14889+ pax_set_fptr_mask
14890+#ifdef CONFIG_PAX_MEMORY_UDEREF
14891+ call pax_enter_kernel_user
14892+#endif
14893+ .endm
14894+
14895+ .macro pax_exit_kernel_user
14896+#ifdef CONFIG_PAX_MEMORY_UDEREF
14897+ call pax_exit_kernel_user
14898+#endif
14899+#ifdef CONFIG_PAX_RANDKSTACK
14900+ pushq %rax
14901+ pushq %r11
14902+ call pax_randomize_kstack
14903+ popq %r11
14904+ popq %rax
14905+#endif
14906+ .endm
14907+
14908+ .macro pax_erase_kstack
14909+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14910+ call pax_erase_kstack
14911+#endif
14912+ .endm
14913+
14914 /*
14915 * 32bit SYSENTER instruction entry.
14916 *
14917@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14918 CFI_REGISTER rsp,rbp
14919 SWAPGS_UNSAFE_STACK
14920 movq PER_CPU_VAR(kernel_stack), %rsp
14921- addq $(KERNEL_STACK_OFFSET),%rsp
14922- /*
14923- * No need to follow this irqs on/off section: the syscall
14924- * disabled irqs, here we enable it straight after entry:
14925- */
14926- ENABLE_INTERRUPTS(CLBR_NONE)
14927 movl %ebp,%ebp /* zero extension */
14928 pushq_cfi $__USER32_DS
14929 /*CFI_REL_OFFSET ss,0*/
14930@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
14931 CFI_REL_OFFSET rsp,0
14932 pushfq_cfi
14933 /*CFI_REL_OFFSET rflags,0*/
14934- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14935- CFI_REGISTER rip,r10
14936+ orl $X86_EFLAGS_IF,(%rsp)
14937+ GET_THREAD_INFO(%r11)
14938+ movl TI_sysenter_return(%r11), %r11d
14939+ CFI_REGISTER rip,r11
14940 pushq_cfi $__USER32_CS
14941 /*CFI_REL_OFFSET cs,0*/
14942 movl %eax, %eax
14943- pushq_cfi %r10
14944+ pushq_cfi %r11
14945 CFI_REL_OFFSET rip,0
14946 pushq_cfi %rax
14947 cld
14948 SAVE_ARGS 0,1,0
14949+ pax_enter_kernel_user
14950+
14951+#ifdef CONFIG_PAX_RANDKSTACK
14952+ pax_erase_kstack
14953+#endif
14954+
14955+ /*
14956+ * No need to follow this irqs on/off section: the syscall
14957+ * disabled irqs, here we enable it straight after entry:
14958+ */
14959+ ENABLE_INTERRUPTS(CLBR_NONE)
14960 /* no need to do an access_ok check here because rbp has been
14961 32bit zero extended */
14962+
14963+#ifdef CONFIG_PAX_MEMORY_UDEREF
14964+ addq pax_user_shadow_base,%rbp
14965+ ASM_PAX_OPEN_USERLAND
14966+#endif
14967+
14968 ASM_STAC
14969 1: movl (%rbp),%ebp
14970 _ASM_EXTABLE(1b,ia32_badarg)
14971 ASM_CLAC
14972- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14973- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14974+
14975+#ifdef CONFIG_PAX_MEMORY_UDEREF
14976+ ASM_PAX_CLOSE_USERLAND
14977+#endif
14978+
14979+ GET_THREAD_INFO(%r11)
14980+ orl $TS_COMPAT,TI_status(%r11)
14981+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14982 CFI_REMEMBER_STATE
14983 jnz sysenter_tracesys
14984 cmpq $(IA32_NR_syscalls-1),%rax
14985@@ -162,15 +209,18 @@ sysenter_do_call:
14986 sysenter_dispatch:
14987 call *ia32_sys_call_table(,%rax,8)
14988 movq %rax,RAX-ARGOFFSET(%rsp)
14989+ GET_THREAD_INFO(%r11)
14990 DISABLE_INTERRUPTS(CLBR_NONE)
14991 TRACE_IRQS_OFF
14992- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14993+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14994 jnz sysexit_audit
14995 sysexit_from_sys_call:
14996- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14997+ pax_exit_kernel_user
14998+ pax_erase_kstack
14999+ andl $~TS_COMPAT,TI_status(%r11)
15000 /* clear IF, that popfq doesn't enable interrupts early */
15001- andl $~0x200,EFLAGS-R11(%rsp)
15002- movl RIP-R11(%rsp),%edx /* User %eip */
15003+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
15004+ movl RIP(%rsp),%edx /* User %eip */
15005 CFI_REGISTER rip,rdx
15006 RESTORE_ARGS 0,24,0,0,0,0
15007 xorq %r8,%r8
15008@@ -193,6 +243,9 @@ sysexit_from_sys_call:
15009 movl %eax,%esi /* 2nd arg: syscall number */
15010 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
15011 call __audit_syscall_entry
15012+
15013+ pax_erase_kstack
15014+
15015 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
15016 cmpq $(IA32_NR_syscalls-1),%rax
15017 ja ia32_badsys
15018@@ -204,7 +257,7 @@ sysexit_from_sys_call:
15019 .endm
15020
15021 .macro auditsys_exit exit
15022- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15023+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15024 jnz ia32_ret_from_sys_call
15025 TRACE_IRQS_ON
15026 ENABLE_INTERRUPTS(CLBR_NONE)
15027@@ -215,11 +268,12 @@ sysexit_from_sys_call:
15028 1: setbe %al /* 1 if error, 0 if not */
15029 movzbl %al,%edi /* zero-extend that into %edi */
15030 call __audit_syscall_exit
15031+ GET_THREAD_INFO(%r11)
15032 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
15033 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
15034 DISABLE_INTERRUPTS(CLBR_NONE)
15035 TRACE_IRQS_OFF
15036- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15037+ testl %edi,TI_flags(%r11)
15038 jz \exit
15039 CLEAR_RREGS -ARGOFFSET
15040 jmp int_with_check
15041@@ -237,7 +291,7 @@ sysexit_audit:
15042
15043 sysenter_tracesys:
15044 #ifdef CONFIG_AUDITSYSCALL
15045- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15046+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15047 jz sysenter_auditsys
15048 #endif
15049 SAVE_REST
15050@@ -249,6 +303,9 @@ sysenter_tracesys:
15051 RESTORE_REST
15052 cmpq $(IA32_NR_syscalls-1),%rax
15053 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
15054+
15055+ pax_erase_kstack
15056+
15057 jmp sysenter_do_call
15058 CFI_ENDPROC
15059 ENDPROC(ia32_sysenter_target)
15060@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
15061 ENTRY(ia32_cstar_target)
15062 CFI_STARTPROC32 simple
15063 CFI_SIGNAL_FRAME
15064- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15065+ CFI_DEF_CFA rsp,0
15066 CFI_REGISTER rip,rcx
15067 /*CFI_REGISTER rflags,r11*/
15068 SWAPGS_UNSAFE_STACK
15069 movl %esp,%r8d
15070 CFI_REGISTER rsp,r8
15071 movq PER_CPU_VAR(kernel_stack),%rsp
15072+ SAVE_ARGS 8*6,0,0
15073+ pax_enter_kernel_user
15074+
15075+#ifdef CONFIG_PAX_RANDKSTACK
15076+ pax_erase_kstack
15077+#endif
15078+
15079 /*
15080 * No need to follow this irqs on/off section: the syscall
15081 * disabled irqs and here we enable it straight after entry:
15082 */
15083 ENABLE_INTERRUPTS(CLBR_NONE)
15084- SAVE_ARGS 8,0,0
15085 movl %eax,%eax /* zero extension */
15086 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15087 movq %rcx,RIP-ARGOFFSET(%rsp)
15088@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
15089 /* no need to do an access_ok check here because r8 has been
15090 32bit zero extended */
15091 /* hardware stack frame is complete now */
15092+
15093+#ifdef CONFIG_PAX_MEMORY_UDEREF
15094+ ASM_PAX_OPEN_USERLAND
15095+ movq pax_user_shadow_base,%r8
15096+ addq RSP-ARGOFFSET(%rsp),%r8
15097+#endif
15098+
15099 ASM_STAC
15100 1: movl (%r8),%r9d
15101 _ASM_EXTABLE(1b,ia32_badarg)
15102 ASM_CLAC
15103- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15104- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15105+
15106+#ifdef CONFIG_PAX_MEMORY_UDEREF
15107+ ASM_PAX_CLOSE_USERLAND
15108+#endif
15109+
15110+ GET_THREAD_INFO(%r11)
15111+ orl $TS_COMPAT,TI_status(%r11)
15112+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15113 CFI_REMEMBER_STATE
15114 jnz cstar_tracesys
15115 cmpq $IA32_NR_syscalls-1,%rax
15116@@ -319,13 +395,16 @@ cstar_do_call:
15117 cstar_dispatch:
15118 call *ia32_sys_call_table(,%rax,8)
15119 movq %rax,RAX-ARGOFFSET(%rsp)
15120+ GET_THREAD_INFO(%r11)
15121 DISABLE_INTERRUPTS(CLBR_NONE)
15122 TRACE_IRQS_OFF
15123- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15124+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15125 jnz sysretl_audit
15126 sysretl_from_sys_call:
15127- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15128- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
15129+ pax_exit_kernel_user
15130+ pax_erase_kstack
15131+ andl $~TS_COMPAT,TI_status(%r11)
15132+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
15133 movl RIP-ARGOFFSET(%rsp),%ecx
15134 CFI_REGISTER rip,rcx
15135 movl EFLAGS-ARGOFFSET(%rsp),%r11d
15136@@ -352,7 +431,7 @@ sysretl_audit:
15137
15138 cstar_tracesys:
15139 #ifdef CONFIG_AUDITSYSCALL
15140- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15141+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15142 jz cstar_auditsys
15143 #endif
15144 xchgl %r9d,%ebp
15145@@ -366,11 +445,19 @@ cstar_tracesys:
15146 xchgl %ebp,%r9d
15147 cmpq $(IA32_NR_syscalls-1),%rax
15148 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
15149+
15150+ pax_erase_kstack
15151+
15152 jmp cstar_do_call
15153 END(ia32_cstar_target)
15154
15155 ia32_badarg:
15156 ASM_CLAC
15157+
15158+#ifdef CONFIG_PAX_MEMORY_UDEREF
15159+ ASM_PAX_CLOSE_USERLAND
15160+#endif
15161+
15162 movq $-EFAULT,%rax
15163 jmp ia32_sysret
15164 CFI_ENDPROC
15165@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
15166 CFI_REL_OFFSET rip,RIP-RIP
15167 PARAVIRT_ADJUST_EXCEPTION_FRAME
15168 SWAPGS
15169- /*
15170- * No need to follow this irqs on/off section: the syscall
15171- * disabled irqs and here we enable it straight after entry:
15172- */
15173- ENABLE_INTERRUPTS(CLBR_NONE)
15174 movl %eax,%eax
15175 pushq_cfi %rax
15176 cld
15177 /* note the registers are not zero extended to the sf.
15178 this could be a problem. */
15179 SAVE_ARGS 0,1,0
15180- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15181- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15182+ pax_enter_kernel_user
15183+
15184+#ifdef CONFIG_PAX_RANDKSTACK
15185+ pax_erase_kstack
15186+#endif
15187+
15188+ /*
15189+ * No need to follow this irqs on/off section: the syscall
15190+ * disabled irqs and here we enable it straight after entry:
15191+ */
15192+ ENABLE_INTERRUPTS(CLBR_NONE)
15193+ GET_THREAD_INFO(%r11)
15194+ orl $TS_COMPAT,TI_status(%r11)
15195+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15196 jnz ia32_tracesys
15197 cmpq $(IA32_NR_syscalls-1),%rax
15198 ja ia32_badsys
15199@@ -442,6 +536,9 @@ ia32_tracesys:
15200 RESTORE_REST
15201 cmpq $(IA32_NR_syscalls-1),%rax
15202 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15203+
15204+ pax_erase_kstack
15205+
15206 jmp ia32_do_call
15207 END(ia32_syscall)
15208
15209diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15210index 8e0ceec..af13504 100644
15211--- a/arch/x86/ia32/sys_ia32.c
15212+++ b/arch/x86/ia32/sys_ia32.c
15213@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15214 */
15215 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15216 {
15217- typeof(ubuf->st_uid) uid = 0;
15218- typeof(ubuf->st_gid) gid = 0;
15219+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15220+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15221 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15222 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15223 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15224diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15225index 372231c..51b537d 100644
15226--- a/arch/x86/include/asm/alternative-asm.h
15227+++ b/arch/x86/include/asm/alternative-asm.h
15228@@ -18,6 +18,45 @@
15229 .endm
15230 #endif
15231
15232+#ifdef KERNEXEC_PLUGIN
15233+ .macro pax_force_retaddr_bts rip=0
15234+ btsq $63,\rip(%rsp)
15235+ .endm
15236+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15237+ .macro pax_force_retaddr rip=0, reload=0
15238+ btsq $63,\rip(%rsp)
15239+ .endm
15240+ .macro pax_force_fptr ptr
15241+ btsq $63,\ptr
15242+ .endm
15243+ .macro pax_set_fptr_mask
15244+ .endm
15245+#endif
15246+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15247+ .macro pax_force_retaddr rip=0, reload=0
15248+ .if \reload
15249+ pax_set_fptr_mask
15250+ .endif
15251+ orq %r12,\rip(%rsp)
15252+ .endm
15253+ .macro pax_force_fptr ptr
15254+ orq %r12,\ptr
15255+ .endm
15256+ .macro pax_set_fptr_mask
15257+ movabs $0x8000000000000000,%r12
15258+ .endm
15259+#endif
15260+#else
15261+ .macro pax_force_retaddr rip=0, reload=0
15262+ .endm
15263+ .macro pax_force_fptr ptr
15264+ .endm
15265+ .macro pax_force_retaddr_bts rip=0
15266+ .endm
15267+ .macro pax_set_fptr_mask
15268+ .endm
15269+#endif
15270+
15271 .macro altinstruction_entry orig alt feature orig_len alt_len
15272 .long \orig - .
15273 .long \alt - .
15274diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15275index 0a3f9c9..c9d081d 100644
15276--- a/arch/x86/include/asm/alternative.h
15277+++ b/arch/x86/include/asm/alternative.h
15278@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15279 ".pushsection .discard,\"aw\",@progbits\n" \
15280 DISCARD_ENTRY(1) \
15281 ".popsection\n" \
15282- ".pushsection .altinstr_replacement, \"ax\"\n" \
15283+ ".pushsection .altinstr_replacement, \"a\"\n" \
15284 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15285 ".popsection"
15286
15287@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15288 DISCARD_ENTRY(1) \
15289 DISCARD_ENTRY(2) \
15290 ".popsection\n" \
15291- ".pushsection .altinstr_replacement, \"ax\"\n" \
15292+ ".pushsection .altinstr_replacement, \"a\"\n" \
15293 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15294 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15295 ".popsection"
15296diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15297index 19b0eba..12254cd 100644
15298--- a/arch/x86/include/asm/apic.h
15299+++ b/arch/x86/include/asm/apic.h
15300@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15301
15302 #ifdef CONFIG_X86_LOCAL_APIC
15303
15304-extern unsigned int apic_verbosity;
15305+extern int apic_verbosity;
15306 extern int local_apic_timer_c2_ok;
15307
15308 extern int disable_apic;
15309diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15310index 20370c6..a2eb9b0 100644
15311--- a/arch/x86/include/asm/apm.h
15312+++ b/arch/x86/include/asm/apm.h
15313@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15314 __asm__ __volatile__(APM_DO_ZERO_SEGS
15315 "pushl %%edi\n\t"
15316 "pushl %%ebp\n\t"
15317- "lcall *%%cs:apm_bios_entry\n\t"
15318+ "lcall *%%ss:apm_bios_entry\n\t"
15319 "setc %%al\n\t"
15320 "popl %%ebp\n\t"
15321 "popl %%edi\n\t"
15322@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15323 __asm__ __volatile__(APM_DO_ZERO_SEGS
15324 "pushl %%edi\n\t"
15325 "pushl %%ebp\n\t"
15326- "lcall *%%cs:apm_bios_entry\n\t"
15327+ "lcall *%%ss:apm_bios_entry\n\t"
15328 "setc %%bl\n\t"
15329 "popl %%ebp\n\t"
15330 "popl %%edi\n\t"
15331diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15332index 6dd1c7dd..2edd216 100644
15333--- a/arch/x86/include/asm/atomic.h
15334+++ b/arch/x86/include/asm/atomic.h
15335@@ -24,7 +24,18 @@
15336 */
15337 static inline int atomic_read(const atomic_t *v)
15338 {
15339- return (*(volatile int *)&(v)->counter);
15340+ return (*(volatile const int *)&(v)->counter);
15341+}
15342+
15343+/**
15344+ * atomic_read_unchecked - read atomic variable
15345+ * @v: pointer of type atomic_unchecked_t
15346+ *
15347+ * Atomically reads the value of @v.
15348+ */
15349+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15350+{
15351+ return (*(volatile const int *)&(v)->counter);
15352 }
15353
15354 /**
15355@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15356 }
15357
15358 /**
15359+ * atomic_set_unchecked - set atomic variable
15360+ * @v: pointer of type atomic_unchecked_t
15361+ * @i: required value
15362+ *
15363+ * Atomically sets the value of @v to @i.
15364+ */
15365+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15366+{
15367+ v->counter = i;
15368+}
15369+
15370+/**
15371 * atomic_add - add integer to atomic variable
15372 * @i: integer value to add
15373 * @v: pointer of type atomic_t
15374@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15375 */
15376 static inline void atomic_add(int i, atomic_t *v)
15377 {
15378- asm volatile(LOCK_PREFIX "addl %1,%0"
15379+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15380+
15381+#ifdef CONFIG_PAX_REFCOUNT
15382+ "jno 0f\n"
15383+ LOCK_PREFIX "subl %1,%0\n"
15384+ "int $4\n0:\n"
15385+ _ASM_EXTABLE(0b, 0b)
15386+#endif
15387+
15388+ : "+m" (v->counter)
15389+ : "ir" (i));
15390+}
15391+
15392+/**
15393+ * atomic_add_unchecked - add integer to atomic variable
15394+ * @i: integer value to add
15395+ * @v: pointer of type atomic_unchecked_t
15396+ *
15397+ * Atomically adds @i to @v.
15398+ */
15399+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15400+{
15401+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15402 : "+m" (v->counter)
15403 : "ir" (i));
15404 }
15405@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15406 */
15407 static inline void atomic_sub(int i, atomic_t *v)
15408 {
15409- asm volatile(LOCK_PREFIX "subl %1,%0"
15410+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15411+
15412+#ifdef CONFIG_PAX_REFCOUNT
15413+ "jno 0f\n"
15414+ LOCK_PREFIX "addl %1,%0\n"
15415+ "int $4\n0:\n"
15416+ _ASM_EXTABLE(0b, 0b)
15417+#endif
15418+
15419+ : "+m" (v->counter)
15420+ : "ir" (i));
15421+}
15422+
15423+/**
15424+ * atomic_sub_unchecked - subtract integer from atomic variable
15425+ * @i: integer value to subtract
15426+ * @v: pointer of type atomic_unchecked_t
15427+ *
15428+ * Atomically subtracts @i from @v.
15429+ */
15430+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15431+{
15432+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15433 : "+m" (v->counter)
15434 : "ir" (i));
15435 }
15436@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15437 */
15438 static inline int atomic_sub_and_test(int i, atomic_t *v)
15439 {
15440- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15441+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15442 }
15443
15444 /**
15445@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15446 */
15447 static inline void atomic_inc(atomic_t *v)
15448 {
15449- asm volatile(LOCK_PREFIX "incl %0"
15450+ asm volatile(LOCK_PREFIX "incl %0\n"
15451+
15452+#ifdef CONFIG_PAX_REFCOUNT
15453+ "jno 0f\n"
15454+ LOCK_PREFIX "decl %0\n"
15455+ "int $4\n0:\n"
15456+ _ASM_EXTABLE(0b, 0b)
15457+#endif
15458+
15459+ : "+m" (v->counter));
15460+}
15461+
15462+/**
15463+ * atomic_inc_unchecked - increment atomic variable
15464+ * @v: pointer of type atomic_unchecked_t
15465+ *
15466+ * Atomically increments @v by 1.
15467+ */
15468+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15469+{
15470+ asm volatile(LOCK_PREFIX "incl %0\n"
15471 : "+m" (v->counter));
15472 }
15473
15474@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15475 */
15476 static inline void atomic_dec(atomic_t *v)
15477 {
15478- asm volatile(LOCK_PREFIX "decl %0"
15479+ asm volatile(LOCK_PREFIX "decl %0\n"
15480+
15481+#ifdef CONFIG_PAX_REFCOUNT
15482+ "jno 0f\n"
15483+ LOCK_PREFIX "incl %0\n"
15484+ "int $4\n0:\n"
15485+ _ASM_EXTABLE(0b, 0b)
15486+#endif
15487+
15488+ : "+m" (v->counter));
15489+}
15490+
15491+/**
15492+ * atomic_dec_unchecked - decrement atomic variable
15493+ * @v: pointer of type atomic_unchecked_t
15494+ *
15495+ * Atomically decrements @v by 1.
15496+ */
15497+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15498+{
15499+ asm volatile(LOCK_PREFIX "decl %0\n"
15500 : "+m" (v->counter));
15501 }
15502
15503@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15504 */
15505 static inline int atomic_dec_and_test(atomic_t *v)
15506 {
15507- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15508+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15509 }
15510
15511 /**
15512@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15513 */
15514 static inline int atomic_inc_and_test(atomic_t *v)
15515 {
15516- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15517+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15518+}
15519+
15520+/**
15521+ * atomic_inc_and_test_unchecked - increment and test
15522+ * @v: pointer of type atomic_unchecked_t
15523+ *
15524+ * Atomically increments @v by 1
15525+ * and returns true if the result is zero, or false for all
15526+ * other cases.
15527+ */
15528+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15529+{
15530+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15531 }
15532
15533 /**
15534@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15535 */
15536 static inline int atomic_add_negative(int i, atomic_t *v)
15537 {
15538- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15539+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15540 }
15541
15542 /**
15543@@ -154,6 +274,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15544 */
15545 static inline int atomic_add_return(int i, atomic_t *v)
15546 {
15547+ return i + xadd_check_overflow(&v->counter, i);
15548+}
15549+
15550+/**
15551+ * atomic_add_return_unchecked - add integer and return
15552+ * @i: integer value to add
15553+ * @v: pointer of type atomic_unchecked_t
15554+ *
15555+ * Atomically adds @i to @v and returns @i + @v
15556+ */
15557+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15558+{
15559 return i + xadd(&v->counter, i);
15560 }
15561
15562@@ -170,9 +302,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
15563 }
15564
15565 #define atomic_inc_return(v) (atomic_add_return(1, v))
15566+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15567+{
15568+ return atomic_add_return_unchecked(1, v);
15569+}
15570 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15571
15572-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15573+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15574+{
15575+ return cmpxchg(&v->counter, old, new);
15576+}
15577+
15578+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15579 {
15580 return cmpxchg(&v->counter, old, new);
15581 }
15582@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15583 return xchg(&v->counter, new);
15584 }
15585
15586+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15587+{
15588+ return xchg(&v->counter, new);
15589+}
15590+
15591 /**
15592 * __atomic_add_unless - add unless the number is already a given value
15593 * @v: pointer of type atomic_t
15594@@ -191,14 +337,27 @@ static inline int atomic_xchg(atomic_t *v, int new)
15595 * Atomically adds @a to @v, so long as @v was not already @u.
15596 * Returns the old value of @v.
15597 */
15598-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15599+static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u)
15600 {
15601- int c, old;
15602+ int c, old, new;
15603 c = atomic_read(v);
15604 for (;;) {
15605- if (unlikely(c == (u)))
15606+ if (unlikely(c == u))
15607 break;
15608- old = atomic_cmpxchg((v), c, c + (a));
15609+
15610+ asm volatile("addl %2,%0\n"
15611+
15612+#ifdef CONFIG_PAX_REFCOUNT
15613+ "jno 0f\n"
15614+ "subl %2,%0\n"
15615+ "int $4\n0:\n"
15616+ _ASM_EXTABLE(0b, 0b)
15617+#endif
15618+
15619+ : "=r" (new)
15620+ : "0" (c), "ir" (a));
15621+
15622+ old = atomic_cmpxchg(v, c, new);
15623 if (likely(old == c))
15624 break;
15625 c = old;
15626@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15627 }
15628
15629 /**
15630+ * atomic_inc_not_zero_hint - increment if not null
15631+ * @v: pointer of type atomic_t
15632+ * @hint: probable value of the atomic before the increment
15633+ *
15634+ * This version of atomic_inc_not_zero() gives a hint of probable
15635+ * value of the atomic. This helps processor to not read the memory
15636+ * before doing the atomic read/modify/write cycle, lowering
15637+ * number of bus transactions on some arches.
15638+ *
15639+ * Returns: 0 if increment was not done, 1 otherwise.
15640+ */
15641+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15642+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15643+{
15644+ int val, c = hint, new;
15645+
15646+ /* sanity test, should be removed by compiler if hint is a constant */
15647+ if (!hint)
15648+ return __atomic_add_unless(v, 1, 0);
15649+
15650+ do {
15651+ asm volatile("incl %0\n"
15652+
15653+#ifdef CONFIG_PAX_REFCOUNT
15654+ "jno 0f\n"
15655+ "decl %0\n"
15656+ "int $4\n0:\n"
15657+ _ASM_EXTABLE(0b, 0b)
15658+#endif
15659+
15660+ : "=r" (new)
15661+ : "0" (c));
15662+
15663+ val = atomic_cmpxchg(v, c, new);
15664+ if (val == c)
15665+ return 1;
15666+ c = val;
15667+ } while (c);
15668+
15669+ return 0;
15670+}
15671+
15672+/**
15673 * atomic_inc_short - increment of a short integer
15674 * @v: pointer to type int
15675 *
15676@@ -235,14 +437,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
15677 #endif
15678
15679 /* These are x86-specific, used by some header files */
15680-#define atomic_clear_mask(mask, addr) \
15681- asm volatile(LOCK_PREFIX "andl %0,%1" \
15682- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15683+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15684+{
15685+ asm volatile(LOCK_PREFIX "andl %1,%0"
15686+ : "+m" (v->counter)
15687+ : "r" (~(mask))
15688+ : "memory");
15689+}
15690
15691-#define atomic_set_mask(mask, addr) \
15692- asm volatile(LOCK_PREFIX "orl %0,%1" \
15693- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15694- : "memory")
15695+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15696+{
15697+ asm volatile(LOCK_PREFIX "andl %1,%0"
15698+ : "+m" (v->counter)
15699+ : "r" (~(mask))
15700+ : "memory");
15701+}
15702+
15703+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15704+{
15705+ asm volatile(LOCK_PREFIX "orl %1,%0"
15706+ : "+m" (v->counter)
15707+ : "r" (mask)
15708+ : "memory");
15709+}
15710+
15711+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15712+{
15713+ asm volatile(LOCK_PREFIX "orl %1,%0"
15714+ : "+m" (v->counter)
15715+ : "r" (mask)
15716+ : "memory");
15717+}
15718
15719 #ifdef CONFIG_X86_32
15720 # include <asm/atomic64_32.h>
15721diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15722index b154de7..bf18a5a 100644
15723--- a/arch/x86/include/asm/atomic64_32.h
15724+++ b/arch/x86/include/asm/atomic64_32.h
15725@@ -12,6 +12,14 @@ typedef struct {
15726 u64 __aligned(8) counter;
15727 } atomic64_t;
15728
15729+#ifdef CONFIG_PAX_REFCOUNT
15730+typedef struct {
15731+ u64 __aligned(8) counter;
15732+} atomic64_unchecked_t;
15733+#else
15734+typedef atomic64_t atomic64_unchecked_t;
15735+#endif
15736+
15737 #define ATOMIC64_INIT(val) { (val) }
15738
15739 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15740@@ -37,21 +45,31 @@ typedef struct {
15741 ATOMIC64_DECL_ONE(sym##_386)
15742
15743 ATOMIC64_DECL_ONE(add_386);
15744+ATOMIC64_DECL_ONE(add_unchecked_386);
15745 ATOMIC64_DECL_ONE(sub_386);
15746+ATOMIC64_DECL_ONE(sub_unchecked_386);
15747 ATOMIC64_DECL_ONE(inc_386);
15748+ATOMIC64_DECL_ONE(inc_unchecked_386);
15749 ATOMIC64_DECL_ONE(dec_386);
15750+ATOMIC64_DECL_ONE(dec_unchecked_386);
15751 #endif
15752
15753 #define alternative_atomic64(f, out, in...) \
15754 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15755
15756 ATOMIC64_DECL(read);
15757+ATOMIC64_DECL(read_unchecked);
15758 ATOMIC64_DECL(set);
15759+ATOMIC64_DECL(set_unchecked);
15760 ATOMIC64_DECL(xchg);
15761 ATOMIC64_DECL(add_return);
15762+ATOMIC64_DECL(add_return_unchecked);
15763 ATOMIC64_DECL(sub_return);
15764+ATOMIC64_DECL(sub_return_unchecked);
15765 ATOMIC64_DECL(inc_return);
15766+ATOMIC64_DECL(inc_return_unchecked);
15767 ATOMIC64_DECL(dec_return);
15768+ATOMIC64_DECL(dec_return_unchecked);
15769 ATOMIC64_DECL(dec_if_positive);
15770 ATOMIC64_DECL(inc_not_zero);
15771 ATOMIC64_DECL(add_unless);
15772@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15773 }
15774
15775 /**
15776+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15777+ * @p: pointer to type atomic64_unchecked_t
15778+ * @o: expected value
15779+ * @n: new value
15780+ *
15781+ * Atomically sets @v to @n if it was equal to @o and returns
15782+ * the old value.
15783+ */
15784+
15785+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15786+{
15787+ return cmpxchg64(&v->counter, o, n);
15788+}
15789+
15790+/**
15791 * atomic64_xchg - xchg atomic64 variable
15792 * @v: pointer to type atomic64_t
15793 * @n: value to assign
15794@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15795 }
15796
15797 /**
15798+ * atomic64_set_unchecked - set atomic64 variable
15799+ * @v: pointer to type atomic64_unchecked_t
15800+ * @n: value to assign
15801+ *
15802+ * Atomically sets the value of @v to @n.
15803+ */
15804+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15805+{
15806+ unsigned high = (unsigned)(i >> 32);
15807+ unsigned low = (unsigned)i;
15808+ alternative_atomic64(set, /* no output */,
15809+ "S" (v), "b" (low), "c" (high)
15810+ : "eax", "edx", "memory");
15811+}
15812+
15813+/**
15814 * atomic64_read - read atomic64 variable
15815 * @v: pointer to type atomic64_t
15816 *
15817@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15818 }
15819
15820 /**
15821+ * atomic64_read_unchecked - read atomic64 variable
15822+ * @v: pointer to type atomic64_unchecked_t
15823+ *
15824+ * Atomically reads the value of @v and returns it.
15825+ */
15826+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15827+{
15828+ long long r;
15829+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15830+ return r;
15831+ }
15832+
15833+/**
15834 * atomic64_add_return - add and return
15835 * @i: integer value to add
15836 * @v: pointer to type atomic64_t
15837@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15838 return i;
15839 }
15840
15841+/**
15842+ * atomic64_add_return_unchecked - add and return
15843+ * @i: integer value to add
15844+ * @v: pointer to type atomic64_unchecked_t
15845+ *
15846+ * Atomically adds @i to @v and returns @i + *@v
15847+ */
15848+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15849+{
15850+ alternative_atomic64(add_return_unchecked,
15851+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15852+ ASM_NO_INPUT_CLOBBER("memory"));
15853+ return i;
15854+}
15855+
15856 /*
15857 * Other variants with different arithmetic operators:
15858 */
15859@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15860 return a;
15861 }
15862
15863+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15864+{
15865+ long long a;
15866+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15867+ "S" (v) : "memory", "ecx");
15868+ return a;
15869+}
15870+
15871 static inline long long atomic64_dec_return(atomic64_t *v)
15872 {
15873 long long a;
15874@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15875 }
15876
15877 /**
15878+ * atomic64_add_unchecked - add integer to atomic64 variable
15879+ * @i: integer value to add
15880+ * @v: pointer to type atomic64_unchecked_t
15881+ *
15882+ * Atomically adds @i to @v.
15883+ */
15884+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15885+{
15886+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15887+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15888+ ASM_NO_INPUT_CLOBBER("memory"));
15889+ return i;
15890+}
15891+
15892+/**
15893 * atomic64_sub - subtract the atomic64 variable
15894 * @i: integer value to subtract
15895 * @v: pointer to type atomic64_t
15896diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15897index 46e9052..ae45136 100644
15898--- a/arch/x86/include/asm/atomic64_64.h
15899+++ b/arch/x86/include/asm/atomic64_64.h
15900@@ -18,7 +18,19 @@
15901 */
15902 static inline long atomic64_read(const atomic64_t *v)
15903 {
15904- return (*(volatile long *)&(v)->counter);
15905+ return (*(volatile const long *)&(v)->counter);
15906+}
15907+
15908+/**
15909+ * atomic64_read_unchecked - read atomic64 variable
15910+ * @v: pointer of type atomic64_unchecked_t
15911+ *
15912+ * Atomically reads the value of @v.
15913+ * Doesn't imply a read memory barrier.
15914+ */
15915+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15916+{
15917+ return (*(volatile const long *)&(v)->counter);
15918 }
15919
15920 /**
15921@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15922 }
15923
15924 /**
15925+ * atomic64_set_unchecked - set atomic64 variable
15926+ * @v: pointer to type atomic64_unchecked_t
15927+ * @i: required value
15928+ *
15929+ * Atomically sets the value of @v to @i.
15930+ */
15931+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15932+{
15933+ v->counter = i;
15934+}
15935+
15936+/**
15937 * atomic64_add - add integer to atomic64 variable
15938 * @i: integer value to add
15939 * @v: pointer to type atomic64_t
15940@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15941 */
15942 static inline void atomic64_add(long i, atomic64_t *v)
15943 {
15944+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15945+
15946+#ifdef CONFIG_PAX_REFCOUNT
15947+ "jno 0f\n"
15948+ LOCK_PREFIX "subq %1,%0\n"
15949+ "int $4\n0:\n"
15950+ _ASM_EXTABLE(0b, 0b)
15951+#endif
15952+
15953+ : "=m" (v->counter)
15954+ : "er" (i), "m" (v->counter));
15955+}
15956+
15957+/**
15958+ * atomic64_add_unchecked - add integer to atomic64 variable
15959+ * @i: integer value to add
15960+ * @v: pointer to type atomic64_unchecked_t
15961+ *
15962+ * Atomically adds @i to @v.
15963+ */
15964+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15965+{
15966 asm volatile(LOCK_PREFIX "addq %1,%0"
15967 : "=m" (v->counter)
15968 : "er" (i), "m" (v->counter));
15969@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15970 */
15971 static inline void atomic64_sub(long i, atomic64_t *v)
15972 {
15973- asm volatile(LOCK_PREFIX "subq %1,%0"
15974+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15975+
15976+#ifdef CONFIG_PAX_REFCOUNT
15977+ "jno 0f\n"
15978+ LOCK_PREFIX "addq %1,%0\n"
15979+ "int $4\n0:\n"
15980+ _ASM_EXTABLE(0b, 0b)
15981+#endif
15982+
15983+ : "=m" (v->counter)
15984+ : "er" (i), "m" (v->counter));
15985+}
15986+
15987+/**
15988+ * atomic64_sub_unchecked - subtract the atomic64 variable
15989+ * @i: integer value to subtract
15990+ * @v: pointer to type atomic64_unchecked_t
15991+ *
15992+ * Atomically subtracts @i from @v.
15993+ */
15994+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15995+{
15996+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15997 : "=m" (v->counter)
15998 : "er" (i), "m" (v->counter));
15999 }
16000@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
16001 */
16002 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16003 {
16004- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
16005+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
16006 }
16007
16008 /**
16009@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16010 */
16011 static inline void atomic64_inc(atomic64_t *v)
16012 {
16013+ asm volatile(LOCK_PREFIX "incq %0\n"
16014+
16015+#ifdef CONFIG_PAX_REFCOUNT
16016+ "jno 0f\n"
16017+ LOCK_PREFIX "decq %0\n"
16018+ "int $4\n0:\n"
16019+ _ASM_EXTABLE(0b, 0b)
16020+#endif
16021+
16022+ : "=m" (v->counter)
16023+ : "m" (v->counter));
16024+}
16025+
16026+/**
16027+ * atomic64_inc_unchecked - increment atomic64 variable
16028+ * @v: pointer to type atomic64_unchecked_t
16029+ *
16030+ * Atomically increments @v by 1.
16031+ */
16032+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
16033+{
16034 asm volatile(LOCK_PREFIX "incq %0"
16035 : "=m" (v->counter)
16036 : "m" (v->counter));
16037@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
16038 */
16039 static inline void atomic64_dec(atomic64_t *v)
16040 {
16041- asm volatile(LOCK_PREFIX "decq %0"
16042+ asm volatile(LOCK_PREFIX "decq %0\n"
16043+
16044+#ifdef CONFIG_PAX_REFCOUNT
16045+ "jno 0f\n"
16046+ LOCK_PREFIX "incq %0\n"
16047+ "int $4\n0:\n"
16048+ _ASM_EXTABLE(0b, 0b)
16049+#endif
16050+
16051+ : "=m" (v->counter)
16052+ : "m" (v->counter));
16053+}
16054+
16055+/**
16056+ * atomic64_dec_unchecked - decrement atomic64 variable
16057+ * @v: pointer to type atomic64_t
16058+ *
16059+ * Atomically decrements @v by 1.
16060+ */
16061+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
16062+{
16063+ asm volatile(LOCK_PREFIX "decq %0\n"
16064 : "=m" (v->counter)
16065 : "m" (v->counter));
16066 }
16067@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
16068 */
16069 static inline int atomic64_dec_and_test(atomic64_t *v)
16070 {
16071- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
16072+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
16073 }
16074
16075 /**
16076@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
16077 */
16078 static inline int atomic64_inc_and_test(atomic64_t *v)
16079 {
16080- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
16081+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
16082 }
16083
16084 /**
16085@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
16086 */
16087 static inline int atomic64_add_negative(long i, atomic64_t *v)
16088 {
16089- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
16090+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
16091 }
16092
16093 /**
16094@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
16095 */
16096 static inline long atomic64_add_return(long i, atomic64_t *v)
16097 {
16098+ return i + xadd_check_overflow(&v->counter, i);
16099+}
16100+
16101+/**
16102+ * atomic64_add_return_unchecked - add and return
16103+ * @i: integer value to add
16104+ * @v: pointer to type atomic64_unchecked_t
16105+ *
16106+ * Atomically adds @i to @v and returns @i + @v
16107+ */
16108+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
16109+{
16110 return i + xadd(&v->counter, i);
16111 }
16112
16113@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
16114 }
16115
16116 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
16117+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16118+{
16119+ return atomic64_add_return_unchecked(1, v);
16120+}
16121 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
16122
16123 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16124@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16125 return cmpxchg(&v->counter, old, new);
16126 }
16127
16128+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
16129+{
16130+ return cmpxchg(&v->counter, old, new);
16131+}
16132+
16133 static inline long atomic64_xchg(atomic64_t *v, long new)
16134 {
16135 return xchg(&v->counter, new);
16136@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
16137 */
16138 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
16139 {
16140- long c, old;
16141+ long c, old, new;
16142 c = atomic64_read(v);
16143 for (;;) {
16144- if (unlikely(c == (u)))
16145+ if (unlikely(c == u))
16146 break;
16147- old = atomic64_cmpxchg((v), c, c + (a));
16148+
16149+ asm volatile("add %2,%0\n"
16150+
16151+#ifdef CONFIG_PAX_REFCOUNT
16152+ "jno 0f\n"
16153+ "sub %2,%0\n"
16154+ "int $4\n0:\n"
16155+ _ASM_EXTABLE(0b, 0b)
16156+#endif
16157+
16158+ : "=r" (new)
16159+ : "0" (c), "ir" (a));
16160+
16161+ old = atomic64_cmpxchg(v, c, new);
16162 if (likely(old == c))
16163 break;
16164 c = old;
16165 }
16166- return c != (u);
16167+ return c != u;
16168 }
16169
16170 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16171diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16172index 5c7198c..44180b5 100644
16173--- a/arch/x86/include/asm/barrier.h
16174+++ b/arch/x86/include/asm/barrier.h
16175@@ -107,7 +107,7 @@
16176 do { \
16177 compiletime_assert_atomic_type(*p); \
16178 smp_mb(); \
16179- ACCESS_ONCE(*p) = (v); \
16180+ ACCESS_ONCE_RW(*p) = (v); \
16181 } while (0)
16182
16183 #define smp_load_acquire(p) \
16184@@ -124,7 +124,7 @@ do { \
16185 do { \
16186 compiletime_assert_atomic_type(*p); \
16187 barrier(); \
16188- ACCESS_ONCE(*p) = (v); \
16189+ ACCESS_ONCE_RW(*p) = (v); \
16190 } while (0)
16191
16192 #define smp_load_acquire(p) \
16193diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16194index afcd35d..141b32d 100644
16195--- a/arch/x86/include/asm/bitops.h
16196+++ b/arch/x86/include/asm/bitops.h
16197@@ -50,7 +50,7 @@
16198 * a mask operation on a byte.
16199 */
16200 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16201-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16202+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16203 #define CONST_MASK(nr) (1 << ((nr) & 7))
16204
16205 /**
16206@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16207 */
16208 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16209 {
16210- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16211+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16212 }
16213
16214 /**
16215@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16216 */
16217 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16218 {
16219- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16220+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16221 }
16222
16223 /**
16224@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16225 */
16226 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16227 {
16228- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16229+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16230 }
16231
16232 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16233@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16234 *
16235 * Undefined if no bit exists, so code should check against 0 first.
16236 */
16237-static inline unsigned long __ffs(unsigned long word)
16238+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16239 {
16240 asm("rep; bsf %1,%0"
16241 : "=r" (word)
16242@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16243 *
16244 * Undefined if no zero exists, so code should check against ~0UL first.
16245 */
16246-static inline unsigned long ffz(unsigned long word)
16247+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16248 {
16249 asm("rep; bsf %1,%0"
16250 : "=r" (word)
16251@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16252 *
16253 * Undefined if no set bit exists, so code should check against 0 first.
16254 */
16255-static inline unsigned long __fls(unsigned long word)
16256+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16257 {
16258 asm("bsr %1,%0"
16259 : "=r" (word)
16260@@ -434,7 +434,7 @@ static inline int ffs(int x)
16261 * set bit if value is nonzero. The last (most significant) bit is
16262 * at position 32.
16263 */
16264-static inline int fls(int x)
16265+static inline int __intentional_overflow(-1) fls(int x)
16266 {
16267 int r;
16268
16269@@ -476,7 +476,7 @@ static inline int fls(int x)
16270 * at position 64.
16271 */
16272 #ifdef CONFIG_X86_64
16273-static __always_inline int fls64(__u64 x)
16274+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16275 {
16276 int bitpos = -1;
16277 /*
16278diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16279index 4fa687a..60f2d39 100644
16280--- a/arch/x86/include/asm/boot.h
16281+++ b/arch/x86/include/asm/boot.h
16282@@ -6,10 +6,15 @@
16283 #include <uapi/asm/boot.h>
16284
16285 /* Physical address where kernel should be loaded. */
16286-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16287+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16288 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16289 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16290
16291+#ifndef __ASSEMBLY__
16292+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16293+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16294+#endif
16295+
16296 /* Minimum kernel alignment, as a power of two */
16297 #ifdef CONFIG_X86_64
16298 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16299diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16300index 48f99f1..d78ebf9 100644
16301--- a/arch/x86/include/asm/cache.h
16302+++ b/arch/x86/include/asm/cache.h
16303@@ -5,12 +5,13 @@
16304
16305 /* L1 cache line size */
16306 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16307-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16308+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16309
16310 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16311+#define __read_only __attribute__((__section__(".data..read_only")))
16312
16313 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16314-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16315+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16316
16317 #ifdef CONFIG_X86_VSMP
16318 #ifdef CONFIG_SMP
16319diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
16320index 9863ee3..4a1f8e1 100644
16321--- a/arch/x86/include/asm/cacheflush.h
16322+++ b/arch/x86/include/asm/cacheflush.h
16323@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
16324 unsigned long pg_flags = pg->flags & _PGMT_MASK;
16325
16326 if (pg_flags == _PGMT_DEFAULT)
16327- return -1;
16328+ return ~0UL;
16329 else if (pg_flags == _PGMT_WC)
16330 return _PAGE_CACHE_WC;
16331 else if (pg_flags == _PGMT_UC_MINUS)
16332diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16333index cb4c73b..c473c29 100644
16334--- a/arch/x86/include/asm/calling.h
16335+++ b/arch/x86/include/asm/calling.h
16336@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
16337 #define RSP 152
16338 #define SS 160
16339
16340-#define ARGOFFSET R11
16341-#define SWFRAME ORIG_RAX
16342+#define ARGOFFSET R15
16343
16344 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
16345- subq $9*8+\addskip, %rsp
16346- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16347- movq_cfi rdi, 8*8
16348- movq_cfi rsi, 7*8
16349- movq_cfi rdx, 6*8
16350+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16351+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16352+ movq_cfi rdi, RDI
16353+ movq_cfi rsi, RSI
16354+ movq_cfi rdx, RDX
16355
16356 .if \save_rcx
16357- movq_cfi rcx, 5*8
16358+ movq_cfi rcx, RCX
16359 .endif
16360
16361- movq_cfi rax, 4*8
16362+ movq_cfi rax, RAX
16363
16364 .if \save_r891011
16365- movq_cfi r8, 3*8
16366- movq_cfi r9, 2*8
16367- movq_cfi r10, 1*8
16368- movq_cfi r11, 0*8
16369+ movq_cfi r8, R8
16370+ movq_cfi r9, R9
16371+ movq_cfi r10, R10
16372+ movq_cfi r11, R11
16373 .endif
16374
16375+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16376+ movq_cfi r12, R12
16377+#endif
16378+
16379 .endm
16380
16381-#define ARG_SKIP (9*8)
16382+#define ARG_SKIP ORIG_RAX
16383
16384 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16385 rstor_r8910=1, rstor_rdx=1
16386+
16387+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16388+ movq_cfi_restore R12, r12
16389+#endif
16390+
16391 .if \rstor_r11
16392- movq_cfi_restore 0*8, r11
16393+ movq_cfi_restore R11, r11
16394 .endif
16395
16396 .if \rstor_r8910
16397- movq_cfi_restore 1*8, r10
16398- movq_cfi_restore 2*8, r9
16399- movq_cfi_restore 3*8, r8
16400+ movq_cfi_restore R10, r10
16401+ movq_cfi_restore R9, r9
16402+ movq_cfi_restore R8, r8
16403 .endif
16404
16405 .if \rstor_rax
16406- movq_cfi_restore 4*8, rax
16407+ movq_cfi_restore RAX, rax
16408 .endif
16409
16410 .if \rstor_rcx
16411- movq_cfi_restore 5*8, rcx
16412+ movq_cfi_restore RCX, rcx
16413 .endif
16414
16415 .if \rstor_rdx
16416- movq_cfi_restore 6*8, rdx
16417+ movq_cfi_restore RDX, rdx
16418 .endif
16419
16420- movq_cfi_restore 7*8, rsi
16421- movq_cfi_restore 8*8, rdi
16422+ movq_cfi_restore RSI, rsi
16423+ movq_cfi_restore RDI, rdi
16424
16425- .if ARG_SKIP+\addskip > 0
16426- addq $ARG_SKIP+\addskip, %rsp
16427- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16428+ .if ORIG_RAX+\addskip > 0
16429+ addq $ORIG_RAX+\addskip, %rsp
16430+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16431 .endif
16432 .endm
16433
16434- .macro LOAD_ARGS offset, skiprax=0
16435- movq \offset(%rsp), %r11
16436- movq \offset+8(%rsp), %r10
16437- movq \offset+16(%rsp), %r9
16438- movq \offset+24(%rsp), %r8
16439- movq \offset+40(%rsp), %rcx
16440- movq \offset+48(%rsp), %rdx
16441- movq \offset+56(%rsp), %rsi
16442- movq \offset+64(%rsp), %rdi
16443+ .macro LOAD_ARGS skiprax=0
16444+ movq R11(%rsp), %r11
16445+ movq R10(%rsp), %r10
16446+ movq R9(%rsp), %r9
16447+ movq R8(%rsp), %r8
16448+ movq RCX(%rsp), %rcx
16449+ movq RDX(%rsp), %rdx
16450+ movq RSI(%rsp), %rsi
16451+ movq RDI(%rsp), %rdi
16452 .if \skiprax
16453 .else
16454- movq \offset+72(%rsp), %rax
16455+ movq RAX(%rsp), %rax
16456 .endif
16457 .endm
16458
16459-#define REST_SKIP (6*8)
16460-
16461 .macro SAVE_REST
16462- subq $REST_SKIP, %rsp
16463- CFI_ADJUST_CFA_OFFSET REST_SKIP
16464- movq_cfi rbx, 5*8
16465- movq_cfi rbp, 4*8
16466- movq_cfi r12, 3*8
16467- movq_cfi r13, 2*8
16468- movq_cfi r14, 1*8
16469- movq_cfi r15, 0*8
16470+ movq_cfi rbx, RBX
16471+ movq_cfi rbp, RBP
16472+
16473+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16474+ movq_cfi r12, R12
16475+#endif
16476+
16477+ movq_cfi r13, R13
16478+ movq_cfi r14, R14
16479+ movq_cfi r15, R15
16480 .endm
16481
16482 .macro RESTORE_REST
16483- movq_cfi_restore 0*8, r15
16484- movq_cfi_restore 1*8, r14
16485- movq_cfi_restore 2*8, r13
16486- movq_cfi_restore 3*8, r12
16487- movq_cfi_restore 4*8, rbp
16488- movq_cfi_restore 5*8, rbx
16489- addq $REST_SKIP, %rsp
16490- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16491+ movq_cfi_restore R15, r15
16492+ movq_cfi_restore R14, r14
16493+ movq_cfi_restore R13, r13
16494+
16495+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16496+ movq_cfi_restore R12, r12
16497+#endif
16498+
16499+ movq_cfi_restore RBP, rbp
16500+ movq_cfi_restore RBX, rbx
16501 .endm
16502
16503 .macro SAVE_ALL
16504diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16505index f50de69..2b0a458 100644
16506--- a/arch/x86/include/asm/checksum_32.h
16507+++ b/arch/x86/include/asm/checksum_32.h
16508@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16509 int len, __wsum sum,
16510 int *src_err_ptr, int *dst_err_ptr);
16511
16512+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16513+ int len, __wsum sum,
16514+ int *src_err_ptr, int *dst_err_ptr);
16515+
16516+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16517+ int len, __wsum sum,
16518+ int *src_err_ptr, int *dst_err_ptr);
16519+
16520 /*
16521 * Note: when you get a NULL pointer exception here this means someone
16522 * passed in an incorrect kernel address to one of these functions.
16523@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16524
16525 might_sleep();
16526 stac();
16527- ret = csum_partial_copy_generic((__force void *)src, dst,
16528+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16529 len, sum, err_ptr, NULL);
16530 clac();
16531
16532@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16533 might_sleep();
16534 if (access_ok(VERIFY_WRITE, dst, len)) {
16535 stac();
16536- ret = csum_partial_copy_generic(src, (__force void *)dst,
16537+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16538 len, sum, NULL, err_ptr);
16539 clac();
16540 return ret;
16541diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16542index d47786a..2d8883e 100644
16543--- a/arch/x86/include/asm/cmpxchg.h
16544+++ b/arch/x86/include/asm/cmpxchg.h
16545@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
16546 __compiletime_error("Bad argument size for cmpxchg");
16547 extern void __xadd_wrong_size(void)
16548 __compiletime_error("Bad argument size for xadd");
16549+extern void __xadd_check_overflow_wrong_size(void)
16550+ __compiletime_error("Bad argument size for xadd_check_overflow");
16551 extern void __add_wrong_size(void)
16552 __compiletime_error("Bad argument size for add");
16553+extern void __add_check_overflow_wrong_size(void)
16554+ __compiletime_error("Bad argument size for add_check_overflow");
16555
16556 /*
16557 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16558@@ -67,6 +71,38 @@ extern void __add_wrong_size(void)
16559 __ret; \
16560 })
16561
16562+#ifdef CONFIG_PAX_REFCOUNT
16563+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16564+ ({ \
16565+ __typeof__ (*(ptr)) __ret = (arg); \
16566+ switch (sizeof(*(ptr))) { \
16567+ case __X86_CASE_L: \
16568+ asm volatile (lock #op "l %0, %1\n" \
16569+ "jno 0f\n" \
16570+ "mov %0,%1\n" \
16571+ "int $4\n0:\n" \
16572+ _ASM_EXTABLE(0b, 0b) \
16573+ : "+r" (__ret), "+m" (*(ptr)) \
16574+ : : "memory", "cc"); \
16575+ break; \
16576+ case __X86_CASE_Q: \
16577+ asm volatile (lock #op "q %q0, %1\n" \
16578+ "jno 0f\n" \
16579+ "mov %0,%1\n" \
16580+ "int $4\n0:\n" \
16581+ _ASM_EXTABLE(0b, 0b) \
16582+ : "+r" (__ret), "+m" (*(ptr)) \
16583+ : : "memory", "cc"); \
16584+ break; \
16585+ default: \
16586+ __ ## op ## _check_overflow_wrong_size(); \
16587+ } \
16588+ __ret; \
16589+ })
16590+#else
16591+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16592+#endif
16593+
16594 /*
16595 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16596 * Since this is generally used to protect other memory information, we
16597@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16598 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16599 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16600
16601+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16602+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16603+
16604 #define __add(ptr, inc, lock) \
16605 ({ \
16606 __typeof__ (*(ptr)) __ret = (inc); \
16607diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16608index 59c6c40..5e0b22c 100644
16609--- a/arch/x86/include/asm/compat.h
16610+++ b/arch/x86/include/asm/compat.h
16611@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16612 typedef u32 compat_uint_t;
16613 typedef u32 compat_ulong_t;
16614 typedef u64 __attribute__((aligned(4))) compat_u64;
16615-typedef u32 compat_uptr_t;
16616+typedef u32 __user compat_uptr_t;
16617
16618 struct compat_timespec {
16619 compat_time_t tv_sec;
16620diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16621index e265ff9..72c253b 100644
16622--- a/arch/x86/include/asm/cpufeature.h
16623+++ b/arch/x86/include/asm/cpufeature.h
16624@@ -203,7 +203,7 @@
16625 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
16626 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
16627 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
16628-
16629+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16630
16631 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16632 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16633@@ -211,7 +211,7 @@
16634 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
16635 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
16636 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
16637-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
16638+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
16639 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
16640 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16641 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
16642@@ -359,6 +359,7 @@ extern const char * const x86_power_flags[32];
16643 #undef cpu_has_centaur_mcr
16644 #define cpu_has_centaur_mcr 0
16645
16646+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16647 #endif /* CONFIG_X86_64 */
16648
16649 #if __GNUC__ >= 4
16650@@ -411,7 +412,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16651
16652 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16653 t_warn:
16654- warn_pre_alternatives();
16655+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16656+ warn_pre_alternatives();
16657 return false;
16658 #endif
16659
16660@@ -431,7 +433,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16661 ".section .discard,\"aw\",@progbits\n"
16662 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16663 ".previous\n"
16664- ".section .altinstr_replacement,\"ax\"\n"
16665+ ".section .altinstr_replacement,\"a\"\n"
16666 "3: movb $1,%0\n"
16667 "4:\n"
16668 ".previous\n"
16669@@ -468,7 +470,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16670 " .byte 2b - 1b\n" /* src len */
16671 " .byte 4f - 3f\n" /* repl len */
16672 ".previous\n"
16673- ".section .altinstr_replacement,\"ax\"\n"
16674+ ".section .altinstr_replacement,\"a\"\n"
16675 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16676 "4:\n"
16677 ".previous\n"
16678@@ -501,7 +503,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16679 ".section .discard,\"aw\",@progbits\n"
16680 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16681 ".previous\n"
16682- ".section .altinstr_replacement,\"ax\"\n"
16683+ ".section .altinstr_replacement,\"a\"\n"
16684 "3: movb $0,%0\n"
16685 "4:\n"
16686 ".previous\n"
16687@@ -515,7 +517,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16688 ".section .discard,\"aw\",@progbits\n"
16689 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16690 ".previous\n"
16691- ".section .altinstr_replacement,\"ax\"\n"
16692+ ".section .altinstr_replacement,\"a\"\n"
16693 "5: movb $1,%0\n"
16694 "6:\n"
16695 ".previous\n"
16696diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16697index 50d033a..37deb26 100644
16698--- a/arch/x86/include/asm/desc.h
16699+++ b/arch/x86/include/asm/desc.h
16700@@ -4,6 +4,7 @@
16701 #include <asm/desc_defs.h>
16702 #include <asm/ldt.h>
16703 #include <asm/mmu.h>
16704+#include <asm/pgtable.h>
16705
16706 #include <linux/smp.h>
16707 #include <linux/percpu.h>
16708@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16709
16710 desc->type = (info->read_exec_only ^ 1) << 1;
16711 desc->type |= info->contents << 2;
16712+ desc->type |= info->seg_not_present ^ 1;
16713
16714 desc->s = 1;
16715 desc->dpl = 0x3;
16716@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16717 }
16718
16719 extern struct desc_ptr idt_descr;
16720-extern gate_desc idt_table[];
16721-extern struct desc_ptr debug_idt_descr;
16722-extern gate_desc debug_idt_table[];
16723-
16724-struct gdt_page {
16725- struct desc_struct gdt[GDT_ENTRIES];
16726-} __attribute__((aligned(PAGE_SIZE)));
16727-
16728-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16729+extern gate_desc idt_table[IDT_ENTRIES];
16730+extern const struct desc_ptr debug_idt_descr;
16731+extern gate_desc debug_idt_table[IDT_ENTRIES];
16732
16733+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16734 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16735 {
16736- return per_cpu(gdt_page, cpu).gdt;
16737+ return cpu_gdt_table[cpu];
16738 }
16739
16740 #ifdef CONFIG_X86_64
16741@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16742 unsigned long base, unsigned dpl, unsigned flags,
16743 unsigned short seg)
16744 {
16745- gate->a = (seg << 16) | (base & 0xffff);
16746- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16747+ gate->gate.offset_low = base;
16748+ gate->gate.seg = seg;
16749+ gate->gate.reserved = 0;
16750+ gate->gate.type = type;
16751+ gate->gate.s = 0;
16752+ gate->gate.dpl = dpl;
16753+ gate->gate.p = 1;
16754+ gate->gate.offset_high = base >> 16;
16755 }
16756
16757 #endif
16758@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16759
16760 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16761 {
16762+ pax_open_kernel();
16763 memcpy(&idt[entry], gate, sizeof(*gate));
16764+ pax_close_kernel();
16765 }
16766
16767 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16768 {
16769+ pax_open_kernel();
16770 memcpy(&ldt[entry], desc, 8);
16771+ pax_close_kernel();
16772 }
16773
16774 static inline void
16775@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16776 default: size = sizeof(*gdt); break;
16777 }
16778
16779+ pax_open_kernel();
16780 memcpy(&gdt[entry], desc, size);
16781+ pax_close_kernel();
16782 }
16783
16784 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16785@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16786
16787 static inline void native_load_tr_desc(void)
16788 {
16789+ pax_open_kernel();
16790 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16791+ pax_close_kernel();
16792 }
16793
16794 static inline void native_load_gdt(const struct desc_ptr *dtr)
16795@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16796 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16797 unsigned int i;
16798
16799+ pax_open_kernel();
16800 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16801 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16802+ pax_close_kernel();
16803 }
16804
16805 #define _LDT_empty(info) \
16806@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
16807 preempt_enable();
16808 }
16809
16810-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16811+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16812 {
16813 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16814 }
16815@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16816 }
16817
16818 #ifdef CONFIG_X86_64
16819-static inline void set_nmi_gate(int gate, void *addr)
16820+static inline void set_nmi_gate(int gate, const void *addr)
16821 {
16822 gate_desc s;
16823
16824@@ -321,14 +334,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16825 #endif
16826
16827 #ifdef CONFIG_TRACING
16828-extern struct desc_ptr trace_idt_descr;
16829-extern gate_desc trace_idt_table[];
16830+extern const struct desc_ptr trace_idt_descr;
16831+extern gate_desc trace_idt_table[IDT_ENTRIES];
16832 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16833 {
16834 write_idt_entry(trace_idt_table, entry, gate);
16835 }
16836
16837-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16838+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16839 unsigned dpl, unsigned ist, unsigned seg)
16840 {
16841 gate_desc s;
16842@@ -348,7 +361,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16843 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16844 #endif
16845
16846-static inline void _set_gate(int gate, unsigned type, void *addr,
16847+static inline void _set_gate(int gate, unsigned type, const void *addr,
16848 unsigned dpl, unsigned ist, unsigned seg)
16849 {
16850 gate_desc s;
16851@@ -371,9 +384,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16852 #define set_intr_gate(n, addr) \
16853 do { \
16854 BUG_ON((unsigned)n > 0xFF); \
16855- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16856+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16857 __KERNEL_CS); \
16858- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16859+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16860 0, 0, __KERNEL_CS); \
16861 } while (0)
16862
16863@@ -401,19 +414,19 @@ static inline void alloc_system_vector(int vector)
16864 /*
16865 * This routine sets up an interrupt gate at directory privilege level 3.
16866 */
16867-static inline void set_system_intr_gate(unsigned int n, void *addr)
16868+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16869 {
16870 BUG_ON((unsigned)n > 0xFF);
16871 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16872 }
16873
16874-static inline void set_system_trap_gate(unsigned int n, void *addr)
16875+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16876 {
16877 BUG_ON((unsigned)n > 0xFF);
16878 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16879 }
16880
16881-static inline void set_trap_gate(unsigned int n, void *addr)
16882+static inline void set_trap_gate(unsigned int n, const void *addr)
16883 {
16884 BUG_ON((unsigned)n > 0xFF);
16885 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16886@@ -422,16 +435,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16887 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16888 {
16889 BUG_ON((unsigned)n > 0xFF);
16890- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16891+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16892 }
16893
16894-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16895+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16896 {
16897 BUG_ON((unsigned)n > 0xFF);
16898 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16899 }
16900
16901-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16902+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16903 {
16904 BUG_ON((unsigned)n > 0xFF);
16905 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16906@@ -503,4 +516,17 @@ static inline void load_current_idt(void)
16907 else
16908 load_idt((const struct desc_ptr *)&idt_descr);
16909 }
16910+
16911+#ifdef CONFIG_X86_32
16912+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16913+{
16914+ struct desc_struct d;
16915+
16916+ if (likely(limit))
16917+ limit = (limit - 1UL) >> PAGE_SHIFT;
16918+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16919+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16920+}
16921+#endif
16922+
16923 #endif /* _ASM_X86_DESC_H */
16924diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16925index 278441f..b95a174 100644
16926--- a/arch/x86/include/asm/desc_defs.h
16927+++ b/arch/x86/include/asm/desc_defs.h
16928@@ -31,6 +31,12 @@ struct desc_struct {
16929 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16930 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16931 };
16932+ struct {
16933+ u16 offset_low;
16934+ u16 seg;
16935+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16936+ unsigned offset_high: 16;
16937+ } gate;
16938 };
16939 } __attribute__((packed));
16940
16941diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16942index ced283a..ffe04cc 100644
16943--- a/arch/x86/include/asm/div64.h
16944+++ b/arch/x86/include/asm/div64.h
16945@@ -39,7 +39,7 @@
16946 __mod; \
16947 })
16948
16949-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16950+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16951 {
16952 union {
16953 u64 v64;
16954diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16955index 1a055c8..a1701de 100644
16956--- a/arch/x86/include/asm/elf.h
16957+++ b/arch/x86/include/asm/elf.h
16958@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16959
16960 #include <asm/vdso.h>
16961
16962-#ifdef CONFIG_X86_64
16963-extern unsigned int vdso64_enabled;
16964-#endif
16965 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16966 extern unsigned int vdso32_enabled;
16967 #endif
16968@@ -248,7 +245,25 @@ extern int force_personality32;
16969 the loader. We need to make sure that it is out of the way of the program
16970 that it will "exec", and that there is sufficient room for the brk. */
16971
16972+#ifdef CONFIG_PAX_SEGMEXEC
16973+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16974+#else
16975 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16976+#endif
16977+
16978+#ifdef CONFIG_PAX_ASLR
16979+#ifdef CONFIG_X86_32
16980+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16981+
16982+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16983+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16984+#else
16985+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16986+
16987+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16988+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16989+#endif
16990+#endif
16991
16992 /* This yields a mask that user programs can use to figure out what
16993 instruction set this CPU supports. This could be done in user space,
16994@@ -297,17 +312,13 @@ do { \
16995
16996 #define ARCH_DLINFO \
16997 do { \
16998- if (vdso64_enabled) \
16999- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17000- (unsigned long __force)current->mm->context.vdso); \
17001+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17002 } while (0)
17003
17004 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
17005 #define ARCH_DLINFO_X32 \
17006 do { \
17007- if (vdso64_enabled) \
17008- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17009- (unsigned long __force)current->mm->context.vdso); \
17010+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17011 } while (0)
17012
17013 #define AT_SYSINFO 32
17014@@ -322,10 +333,10 @@ else \
17015
17016 #endif /* !CONFIG_X86_32 */
17017
17018-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
17019+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
17020
17021 #define VDSO_ENTRY \
17022- ((unsigned long)current->mm->context.vdso + \
17023+ (current->mm->context.vdso + \
17024 selected_vdso32->sym___kernel_vsyscall)
17025
17026 struct linux_binprm;
17027@@ -337,9 +348,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
17028 int uses_interp);
17029 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
17030
17031-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
17032-#define arch_randomize_brk arch_randomize_brk
17033-
17034 /*
17035 * True on X86_32 or when emulating IA32 on X86_64
17036 */
17037diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
17038index 77a99ac..39ff7f5 100644
17039--- a/arch/x86/include/asm/emergency-restart.h
17040+++ b/arch/x86/include/asm/emergency-restart.h
17041@@ -1,6 +1,6 @@
17042 #ifndef _ASM_X86_EMERGENCY_RESTART_H
17043 #define _ASM_X86_EMERGENCY_RESTART_H
17044
17045-extern void machine_emergency_restart(void);
17046+extern void machine_emergency_restart(void) __noreturn;
17047
17048 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
17049diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
17050index 1c7eefe..d0e4702 100644
17051--- a/arch/x86/include/asm/floppy.h
17052+++ b/arch/x86/include/asm/floppy.h
17053@@ -229,18 +229,18 @@ static struct fd_routine_l {
17054 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
17055 } fd_routine[] = {
17056 {
17057- request_dma,
17058- free_dma,
17059- get_dma_residue,
17060- dma_mem_alloc,
17061- hard_dma_setup
17062+ ._request_dma = request_dma,
17063+ ._free_dma = free_dma,
17064+ ._get_dma_residue = get_dma_residue,
17065+ ._dma_mem_alloc = dma_mem_alloc,
17066+ ._dma_setup = hard_dma_setup
17067 },
17068 {
17069- vdma_request_dma,
17070- vdma_nop,
17071- vdma_get_dma_residue,
17072- vdma_mem_alloc,
17073- vdma_dma_setup
17074+ ._request_dma = vdma_request_dma,
17075+ ._free_dma = vdma_nop,
17076+ ._get_dma_residue = vdma_get_dma_residue,
17077+ ._dma_mem_alloc = vdma_mem_alloc,
17078+ ._dma_setup = vdma_dma_setup
17079 }
17080 };
17081
17082diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
17083index 115e368..76ecf6c 100644
17084--- a/arch/x86/include/asm/fpu-internal.h
17085+++ b/arch/x86/include/asm/fpu-internal.h
17086@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17087 #define user_insn(insn, output, input...) \
17088 ({ \
17089 int err; \
17090+ pax_open_userland(); \
17091 asm volatile(ASM_STAC "\n" \
17092- "1:" #insn "\n\t" \
17093+ "1:" \
17094+ __copyuser_seg \
17095+ #insn "\n\t" \
17096 "2: " ASM_CLAC "\n" \
17097 ".section .fixup,\"ax\"\n" \
17098 "3: movl $-1,%[err]\n" \
17099@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17100 _ASM_EXTABLE(1b, 3b) \
17101 : [err] "=r" (err), output \
17102 : "0"(0), input); \
17103+ pax_close_userland(); \
17104 err; \
17105 })
17106
17107@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
17108 "fnclex\n\t"
17109 "emms\n\t"
17110 "fildl %P[addr]" /* set F?P to defined value */
17111- : : [addr] "m" (tsk->thread.fpu.has_fpu));
17112+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
17113 }
17114
17115 return fpu_restore_checking(&tsk->thread.fpu);
17116diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
17117index b4c1f54..e290c08 100644
17118--- a/arch/x86/include/asm/futex.h
17119+++ b/arch/x86/include/asm/futex.h
17120@@ -12,6 +12,7 @@
17121 #include <asm/smap.h>
17122
17123 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
17124+ typecheck(u32 __user *, uaddr); \
17125 asm volatile("\t" ASM_STAC "\n" \
17126 "1:\t" insn "\n" \
17127 "2:\t" ASM_CLAC "\n" \
17128@@ -20,15 +21,16 @@
17129 "\tjmp\t2b\n" \
17130 "\t.previous\n" \
17131 _ASM_EXTABLE(1b, 3b) \
17132- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
17133+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
17134 : "i" (-EFAULT), "0" (oparg), "1" (0))
17135
17136 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
17137+ typecheck(u32 __user *, uaddr); \
17138 asm volatile("\t" ASM_STAC "\n" \
17139 "1:\tmovl %2, %0\n" \
17140 "\tmovl\t%0, %3\n" \
17141 "\t" insn "\n" \
17142- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
17143+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
17144 "\tjnz\t1b\n" \
17145 "3:\t" ASM_CLAC "\n" \
17146 "\t.section .fixup,\"ax\"\n" \
17147@@ -38,7 +40,7 @@
17148 _ASM_EXTABLE(1b, 4b) \
17149 _ASM_EXTABLE(2b, 4b) \
17150 : "=&a" (oldval), "=&r" (ret), \
17151- "+m" (*uaddr), "=&r" (tem) \
17152+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
17153 : "r" (oparg), "i" (-EFAULT), "1" (0))
17154
17155 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17156@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17157
17158 pagefault_disable();
17159
17160+ pax_open_userland();
17161 switch (op) {
17162 case FUTEX_OP_SET:
17163- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
17164+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
17165 break;
17166 case FUTEX_OP_ADD:
17167- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
17168+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
17169 uaddr, oparg);
17170 break;
17171 case FUTEX_OP_OR:
17172@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17173 default:
17174 ret = -ENOSYS;
17175 }
17176+ pax_close_userland();
17177
17178 pagefault_enable();
17179
17180diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17181index 4615906..788c817 100644
17182--- a/arch/x86/include/asm/hw_irq.h
17183+++ b/arch/x86/include/asm/hw_irq.h
17184@@ -164,8 +164,8 @@ extern void setup_ioapic_dest(void);
17185 extern void enable_IO_APIC(void);
17186
17187 /* Statistics */
17188-extern atomic_t irq_err_count;
17189-extern atomic_t irq_mis_count;
17190+extern atomic_unchecked_t irq_err_count;
17191+extern atomic_unchecked_t irq_mis_count;
17192
17193 /* EISA */
17194 extern void eisa_set_level_irq(unsigned int irq);
17195diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17196index a203659..9889f1c 100644
17197--- a/arch/x86/include/asm/i8259.h
17198+++ b/arch/x86/include/asm/i8259.h
17199@@ -62,7 +62,7 @@ struct legacy_pic {
17200 void (*init)(int auto_eoi);
17201 int (*irq_pending)(unsigned int irq);
17202 void (*make_irq)(unsigned int irq);
17203-};
17204+} __do_const;
17205
17206 extern struct legacy_pic *legacy_pic;
17207 extern struct legacy_pic null_legacy_pic;
17208diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17209index b8237d8..3e8864e 100644
17210--- a/arch/x86/include/asm/io.h
17211+++ b/arch/x86/include/asm/io.h
17212@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17213 "m" (*(volatile type __force *)addr) barrier); }
17214
17215 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17216-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17217-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17218+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17219+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17220
17221 build_mmio_read(__readb, "b", unsigned char, "=q", )
17222-build_mmio_read(__readw, "w", unsigned short, "=r", )
17223-build_mmio_read(__readl, "l", unsigned int, "=r", )
17224+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17225+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17226
17227 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17228 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17229@@ -109,7 +109,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17230 * this function
17231 */
17232
17233-static inline phys_addr_t virt_to_phys(volatile void *address)
17234+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17235 {
17236 return __pa(address);
17237 }
17238@@ -185,7 +185,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17239 return ioremap_nocache(offset, size);
17240 }
17241
17242-extern void iounmap(volatile void __iomem *addr);
17243+extern void iounmap(const volatile void __iomem *addr);
17244
17245 extern void set_iounmap_nonlazy(void);
17246
17247@@ -195,6 +195,17 @@ extern void set_iounmap_nonlazy(void);
17248
17249 #include <linux/vmalloc.h>
17250
17251+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17252+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17253+{
17254+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17255+}
17256+
17257+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17258+{
17259+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17260+}
17261+
17262 /*
17263 * Convert a virtual cached pointer to an uncached pointer
17264 */
17265diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17266index 0a8b519..80e7d5b 100644
17267--- a/arch/x86/include/asm/irqflags.h
17268+++ b/arch/x86/include/asm/irqflags.h
17269@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17270 sti; \
17271 sysexit
17272
17273+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17274+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17275+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17276+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17277+
17278 #else
17279 #define INTERRUPT_RETURN iret
17280 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17281diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17282index 53cdfb2..d1369e6 100644
17283--- a/arch/x86/include/asm/kprobes.h
17284+++ b/arch/x86/include/asm/kprobes.h
17285@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
17286 #define RELATIVEJUMP_SIZE 5
17287 #define RELATIVECALL_OPCODE 0xe8
17288 #define RELATIVE_ADDR_SIZE 4
17289-#define MAX_STACK_SIZE 64
17290-#define MIN_STACK_SIZE(ADDR) \
17291- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17292- THREAD_SIZE - (unsigned long)(ADDR))) \
17293- ? (MAX_STACK_SIZE) \
17294- : (((unsigned long)current_thread_info()) + \
17295- THREAD_SIZE - (unsigned long)(ADDR)))
17296+#define MAX_STACK_SIZE 64UL
17297+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17298
17299 #define flush_insn_slot(p) do { } while (0)
17300
17301diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17302index 4ad6560..75c7bdd 100644
17303--- a/arch/x86/include/asm/local.h
17304+++ b/arch/x86/include/asm/local.h
17305@@ -10,33 +10,97 @@ typedef struct {
17306 atomic_long_t a;
17307 } local_t;
17308
17309+typedef struct {
17310+ atomic_long_unchecked_t a;
17311+} local_unchecked_t;
17312+
17313 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17314
17315 #define local_read(l) atomic_long_read(&(l)->a)
17316+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17317 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17318+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17319
17320 static inline void local_inc(local_t *l)
17321 {
17322- asm volatile(_ASM_INC "%0"
17323+ asm volatile(_ASM_INC "%0\n"
17324+
17325+#ifdef CONFIG_PAX_REFCOUNT
17326+ "jno 0f\n"
17327+ _ASM_DEC "%0\n"
17328+ "int $4\n0:\n"
17329+ _ASM_EXTABLE(0b, 0b)
17330+#endif
17331+
17332+ : "+m" (l->a.counter));
17333+}
17334+
17335+static inline void local_inc_unchecked(local_unchecked_t *l)
17336+{
17337+ asm volatile(_ASM_INC "%0\n"
17338 : "+m" (l->a.counter));
17339 }
17340
17341 static inline void local_dec(local_t *l)
17342 {
17343- asm volatile(_ASM_DEC "%0"
17344+ asm volatile(_ASM_DEC "%0\n"
17345+
17346+#ifdef CONFIG_PAX_REFCOUNT
17347+ "jno 0f\n"
17348+ _ASM_INC "%0\n"
17349+ "int $4\n0:\n"
17350+ _ASM_EXTABLE(0b, 0b)
17351+#endif
17352+
17353+ : "+m" (l->a.counter));
17354+}
17355+
17356+static inline void local_dec_unchecked(local_unchecked_t *l)
17357+{
17358+ asm volatile(_ASM_DEC "%0\n"
17359 : "+m" (l->a.counter));
17360 }
17361
17362 static inline void local_add(long i, local_t *l)
17363 {
17364- asm volatile(_ASM_ADD "%1,%0"
17365+ asm volatile(_ASM_ADD "%1,%0\n"
17366+
17367+#ifdef CONFIG_PAX_REFCOUNT
17368+ "jno 0f\n"
17369+ _ASM_SUB "%1,%0\n"
17370+ "int $4\n0:\n"
17371+ _ASM_EXTABLE(0b, 0b)
17372+#endif
17373+
17374+ : "+m" (l->a.counter)
17375+ : "ir" (i));
17376+}
17377+
17378+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17379+{
17380+ asm volatile(_ASM_ADD "%1,%0\n"
17381 : "+m" (l->a.counter)
17382 : "ir" (i));
17383 }
17384
17385 static inline void local_sub(long i, local_t *l)
17386 {
17387- asm volatile(_ASM_SUB "%1,%0"
17388+ asm volatile(_ASM_SUB "%1,%0\n"
17389+
17390+#ifdef CONFIG_PAX_REFCOUNT
17391+ "jno 0f\n"
17392+ _ASM_ADD "%1,%0\n"
17393+ "int $4\n0:\n"
17394+ _ASM_EXTABLE(0b, 0b)
17395+#endif
17396+
17397+ : "+m" (l->a.counter)
17398+ : "ir" (i));
17399+}
17400+
17401+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17402+{
17403+ asm volatile(_ASM_SUB "%1,%0\n"
17404 : "+m" (l->a.counter)
17405 : "ir" (i));
17406 }
17407@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17408 */
17409 static inline int local_sub_and_test(long i, local_t *l)
17410 {
17411- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17412+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17413 }
17414
17415 /**
17416@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17417 */
17418 static inline int local_dec_and_test(local_t *l)
17419 {
17420- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17421+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17422 }
17423
17424 /**
17425@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17426 */
17427 static inline int local_inc_and_test(local_t *l)
17428 {
17429- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17430+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17431 }
17432
17433 /**
17434@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17435 */
17436 static inline int local_add_negative(long i, local_t *l)
17437 {
17438- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17439+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17440 }
17441
17442 /**
17443@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17444 static inline long local_add_return(long i, local_t *l)
17445 {
17446 long __i = i;
17447+ asm volatile(_ASM_XADD "%0, %1\n"
17448+
17449+#ifdef CONFIG_PAX_REFCOUNT
17450+ "jno 0f\n"
17451+ _ASM_MOV "%0,%1\n"
17452+ "int $4\n0:\n"
17453+ _ASM_EXTABLE(0b, 0b)
17454+#endif
17455+
17456+ : "+r" (i), "+m" (l->a.counter)
17457+ : : "memory");
17458+ return i + __i;
17459+}
17460+
17461+/**
17462+ * local_add_return_unchecked - add and return
17463+ * @i: integer value to add
17464+ * @l: pointer to type local_unchecked_t
17465+ *
17466+ * Atomically adds @i to @l and returns @i + @l
17467+ */
17468+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17469+{
17470+ long __i = i;
17471 asm volatile(_ASM_XADD "%0, %1;"
17472 : "+r" (i), "+m" (l->a.counter)
17473 : : "memory");
17474@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17475
17476 #define local_cmpxchg(l, o, n) \
17477 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17478+#define local_cmpxchg_unchecked(l, o, n) \
17479+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17480 /* Always has a lock prefix */
17481 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17482
17483diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17484new file mode 100644
17485index 0000000..2bfd3ba
17486--- /dev/null
17487+++ b/arch/x86/include/asm/mman.h
17488@@ -0,0 +1,15 @@
17489+#ifndef _X86_MMAN_H
17490+#define _X86_MMAN_H
17491+
17492+#include <uapi/asm/mman.h>
17493+
17494+#ifdef __KERNEL__
17495+#ifndef __ASSEMBLY__
17496+#ifdef CONFIG_X86_32
17497+#define arch_mmap_check i386_mmap_check
17498+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17499+#endif
17500+#endif
17501+#endif
17502+
17503+#endif /* X86_MMAN_H */
17504diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17505index 876e74e..e20bfb1 100644
17506--- a/arch/x86/include/asm/mmu.h
17507+++ b/arch/x86/include/asm/mmu.h
17508@@ -9,7 +9,7 @@
17509 * we put the segment information here.
17510 */
17511 typedef struct {
17512- void *ldt;
17513+ struct desc_struct *ldt;
17514 int size;
17515
17516 #ifdef CONFIG_X86_64
17517@@ -18,7 +18,19 @@ typedef struct {
17518 #endif
17519
17520 struct mutex lock;
17521- void __user *vdso;
17522+ unsigned long vdso;
17523+
17524+#ifdef CONFIG_X86_32
17525+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17526+ unsigned long user_cs_base;
17527+ unsigned long user_cs_limit;
17528+
17529+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17530+ cpumask_t cpu_user_cs_mask;
17531+#endif
17532+
17533+#endif
17534+#endif
17535 } mm_context_t;
17536
17537 #ifdef CONFIG_SMP
17538diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17539index be12c53..07fd3ca 100644
17540--- a/arch/x86/include/asm/mmu_context.h
17541+++ b/arch/x86/include/asm/mmu_context.h
17542@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
17543
17544 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17545 {
17546+
17547+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17548+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17549+ unsigned int i;
17550+ pgd_t *pgd;
17551+
17552+ pax_open_kernel();
17553+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17554+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17555+ set_pgd_batched(pgd+i, native_make_pgd(0));
17556+ pax_close_kernel();
17557+ }
17558+#endif
17559+
17560 #ifdef CONFIG_SMP
17561 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17562 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17563@@ -34,16 +48,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17564 struct task_struct *tsk)
17565 {
17566 unsigned cpu = smp_processor_id();
17567+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17568+ int tlbstate = TLBSTATE_OK;
17569+#endif
17570
17571 if (likely(prev != next)) {
17572 #ifdef CONFIG_SMP
17573+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17574+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17575+#endif
17576 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17577 this_cpu_write(cpu_tlbstate.active_mm, next);
17578 #endif
17579 cpumask_set_cpu(cpu, mm_cpumask(next));
17580
17581 /* Re-load page tables */
17582+#ifdef CONFIG_PAX_PER_CPU_PGD
17583+ pax_open_kernel();
17584+
17585+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17586+ if (static_cpu_has(X86_FEATURE_PCID))
17587+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17588+ else
17589+#endif
17590+
17591+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17592+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17593+ pax_close_kernel();
17594+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17595+
17596+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17597+ if (static_cpu_has(X86_FEATURE_PCID)) {
17598+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17599+ u64 descriptor[2];
17600+ descriptor[0] = PCID_USER;
17601+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17602+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17603+ descriptor[0] = PCID_KERNEL;
17604+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17605+ }
17606+ } else {
17607+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17608+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17609+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17610+ else
17611+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17612+ }
17613+ } else
17614+#endif
17615+
17616+ load_cr3(get_cpu_pgd(cpu, kernel));
17617+#else
17618 load_cr3(next->pgd);
17619+#endif
17620
17621 /* Stop flush ipis for the previous mm */
17622 cpumask_clear_cpu(cpu, mm_cpumask(prev));
17623@@ -51,9 +108,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17624 /* Load the LDT, if the LDT is different: */
17625 if (unlikely(prev->context.ldt != next->context.ldt))
17626 load_LDT_nolock(&next->context);
17627+
17628+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17629+ if (!(__supported_pte_mask & _PAGE_NX)) {
17630+ smp_mb__before_atomic();
17631+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17632+ smp_mb__after_atomic();
17633+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17634+ }
17635+#endif
17636+
17637+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17638+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17639+ prev->context.user_cs_limit != next->context.user_cs_limit))
17640+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17641+#ifdef CONFIG_SMP
17642+ else if (unlikely(tlbstate != TLBSTATE_OK))
17643+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17644+#endif
17645+#endif
17646+
17647 }
17648+ else {
17649+
17650+#ifdef CONFIG_PAX_PER_CPU_PGD
17651+ pax_open_kernel();
17652+
17653+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17654+ if (static_cpu_has(X86_FEATURE_PCID))
17655+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17656+ else
17657+#endif
17658+
17659+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17660+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17661+ pax_close_kernel();
17662+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17663+
17664+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17665+ if (static_cpu_has(X86_FEATURE_PCID)) {
17666+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17667+ u64 descriptor[2];
17668+ descriptor[0] = PCID_USER;
17669+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17670+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17671+ descriptor[0] = PCID_KERNEL;
17672+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17673+ }
17674+ } else {
17675+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17676+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17677+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17678+ else
17679+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17680+ }
17681+ } else
17682+#endif
17683+
17684+ load_cr3(get_cpu_pgd(cpu, kernel));
17685+#endif
17686+
17687 #ifdef CONFIG_SMP
17688- else {
17689 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17690 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17691
17692@@ -70,11 +185,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17693 * tlb flush IPI delivery. We must reload CR3
17694 * to make sure to use no freed page tables.
17695 */
17696+
17697+#ifndef CONFIG_PAX_PER_CPU_PGD
17698 load_cr3(next->pgd);
17699+#endif
17700+
17701 load_LDT_nolock(&next->context);
17702+
17703+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17704+ if (!(__supported_pte_mask & _PAGE_NX))
17705+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17706+#endif
17707+
17708+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17709+#ifdef CONFIG_PAX_PAGEEXEC
17710+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17711+#endif
17712+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17713+#endif
17714+
17715 }
17716+#endif
17717 }
17718-#endif
17719 }
17720
17721 #define activate_mm(prev, next) \
17722diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17723index e3b7819..b257c64 100644
17724--- a/arch/x86/include/asm/module.h
17725+++ b/arch/x86/include/asm/module.h
17726@@ -5,6 +5,7 @@
17727
17728 #ifdef CONFIG_X86_64
17729 /* X86_64 does not define MODULE_PROC_FAMILY */
17730+#define MODULE_PROC_FAMILY ""
17731 #elif defined CONFIG_M486
17732 #define MODULE_PROC_FAMILY "486 "
17733 #elif defined CONFIG_M586
17734@@ -57,8 +58,20 @@
17735 #error unknown processor family
17736 #endif
17737
17738-#ifdef CONFIG_X86_32
17739-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17740+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17741+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17742+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17743+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17744+#else
17745+#define MODULE_PAX_KERNEXEC ""
17746 #endif
17747
17748+#ifdef CONFIG_PAX_MEMORY_UDEREF
17749+#define MODULE_PAX_UDEREF "UDEREF "
17750+#else
17751+#define MODULE_PAX_UDEREF ""
17752+#endif
17753+
17754+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17755+
17756 #endif /* _ASM_X86_MODULE_H */
17757diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17758index 5f2fc44..106caa6 100644
17759--- a/arch/x86/include/asm/nmi.h
17760+++ b/arch/x86/include/asm/nmi.h
17761@@ -36,26 +36,35 @@ enum {
17762
17763 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17764
17765+struct nmiaction;
17766+
17767+struct nmiwork {
17768+ const struct nmiaction *action;
17769+ u64 max_duration;
17770+ struct irq_work irq_work;
17771+};
17772+
17773 struct nmiaction {
17774 struct list_head list;
17775 nmi_handler_t handler;
17776- u64 max_duration;
17777- struct irq_work irq_work;
17778 unsigned long flags;
17779 const char *name;
17780-};
17781+ struct nmiwork *work;
17782+} __do_const;
17783
17784 #define register_nmi_handler(t, fn, fg, n, init...) \
17785 ({ \
17786- static struct nmiaction init fn##_na = { \
17787+ static struct nmiwork fn##_nw; \
17788+ static const struct nmiaction init fn##_na = { \
17789 .handler = (fn), \
17790 .name = (n), \
17791 .flags = (fg), \
17792+ .work = &fn##_nw, \
17793 }; \
17794 __register_nmi_handler((t), &fn##_na); \
17795 })
17796
17797-int __register_nmi_handler(unsigned int, struct nmiaction *);
17798+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17799
17800 void unregister_nmi_handler(unsigned int, const char *);
17801
17802diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17803index 775873d..04cd306 100644
17804--- a/arch/x86/include/asm/page.h
17805+++ b/arch/x86/include/asm/page.h
17806@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17807 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17808
17809 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17810+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17811
17812 #define __boot_va(x) __va(x)
17813 #define __boot_pa(x) __pa(x)
17814@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17815 * virt_to_page(kaddr) returns a valid pointer if and only if
17816 * virt_addr_valid(kaddr) returns true.
17817 */
17818-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17819 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17820 extern bool __virt_addr_valid(unsigned long kaddr);
17821 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17822
17823+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17824+#define virt_to_page(kaddr) \
17825+ ({ \
17826+ const void *__kaddr = (const void *)(kaddr); \
17827+ BUG_ON(!virt_addr_valid(__kaddr)); \
17828+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17829+ })
17830+#else
17831+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17832+#endif
17833+
17834 #endif /* __ASSEMBLY__ */
17835
17836 #include <asm-generic/memory_model.h>
17837diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17838index 0f1ddee..e2fc3d1 100644
17839--- a/arch/x86/include/asm/page_64.h
17840+++ b/arch/x86/include/asm/page_64.h
17841@@ -7,9 +7,9 @@
17842
17843 /* duplicated to the one in bootmem.h */
17844 extern unsigned long max_pfn;
17845-extern unsigned long phys_base;
17846+extern const unsigned long phys_base;
17847
17848-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17849+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17850 {
17851 unsigned long y = x - __START_KERNEL_map;
17852
17853diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17854index cd6e1610..70f4418 100644
17855--- a/arch/x86/include/asm/paravirt.h
17856+++ b/arch/x86/include/asm/paravirt.h
17857@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17858 return (pmd_t) { ret };
17859 }
17860
17861-static inline pmdval_t pmd_val(pmd_t pmd)
17862+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17863 {
17864 pmdval_t ret;
17865
17866@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17867 val);
17868 }
17869
17870+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17871+{
17872+ pgdval_t val = native_pgd_val(pgd);
17873+
17874+ if (sizeof(pgdval_t) > sizeof(long))
17875+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17876+ val, (u64)val >> 32);
17877+ else
17878+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17879+ val);
17880+}
17881+
17882 static inline void pgd_clear(pgd_t *pgdp)
17883 {
17884 set_pgd(pgdp, __pgd(0));
17885@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17886 pv_mmu_ops.set_fixmap(idx, phys, flags);
17887 }
17888
17889+#ifdef CONFIG_PAX_KERNEXEC
17890+static inline unsigned long pax_open_kernel(void)
17891+{
17892+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17893+}
17894+
17895+static inline unsigned long pax_close_kernel(void)
17896+{
17897+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17898+}
17899+#else
17900+static inline unsigned long pax_open_kernel(void) { return 0; }
17901+static inline unsigned long pax_close_kernel(void) { return 0; }
17902+#endif
17903+
17904 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17905
17906 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17907@@ -906,7 +933,7 @@ extern void default_banner(void);
17908
17909 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17910 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17911-#define PARA_INDIRECT(addr) *%cs:addr
17912+#define PARA_INDIRECT(addr) *%ss:addr
17913 #endif
17914
17915 #define INTERRUPT_RETURN \
17916@@ -981,6 +1008,21 @@ extern void default_banner(void);
17917 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17918 CLBR_NONE, \
17919 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17920+
17921+#define GET_CR0_INTO_RDI \
17922+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17923+ mov %rax,%rdi
17924+
17925+#define SET_RDI_INTO_CR0 \
17926+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17927+
17928+#define GET_CR3_INTO_RDI \
17929+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17930+ mov %rax,%rdi
17931+
17932+#define SET_RDI_INTO_CR3 \
17933+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17934+
17935 #endif /* CONFIG_X86_32 */
17936
17937 #endif /* __ASSEMBLY__ */
17938diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17939index 7549b8b..f0edfda 100644
17940--- a/arch/x86/include/asm/paravirt_types.h
17941+++ b/arch/x86/include/asm/paravirt_types.h
17942@@ -84,7 +84,7 @@ struct pv_init_ops {
17943 */
17944 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17945 unsigned long addr, unsigned len);
17946-};
17947+} __no_const __no_randomize_layout;
17948
17949
17950 struct pv_lazy_ops {
17951@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17952 void (*enter)(void);
17953 void (*leave)(void);
17954 void (*flush)(void);
17955-};
17956+} __no_randomize_layout;
17957
17958 struct pv_time_ops {
17959 unsigned long long (*sched_clock)(void);
17960 unsigned long long (*steal_clock)(int cpu);
17961 unsigned long (*get_tsc_khz)(void);
17962-};
17963+} __no_const __no_randomize_layout;
17964
17965 struct pv_cpu_ops {
17966 /* hooks for various privileged instructions */
17967@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17968
17969 void (*start_context_switch)(struct task_struct *prev);
17970 void (*end_context_switch)(struct task_struct *next);
17971-};
17972+} __no_const __no_randomize_layout;
17973
17974 struct pv_irq_ops {
17975 /*
17976@@ -215,7 +215,7 @@ struct pv_irq_ops {
17977 #ifdef CONFIG_X86_64
17978 void (*adjust_exception_frame)(void);
17979 #endif
17980-};
17981+} __no_randomize_layout;
17982
17983 struct pv_apic_ops {
17984 #ifdef CONFIG_X86_LOCAL_APIC
17985@@ -223,7 +223,7 @@ struct pv_apic_ops {
17986 unsigned long start_eip,
17987 unsigned long start_esp);
17988 #endif
17989-};
17990+} __no_const __no_randomize_layout;
17991
17992 struct pv_mmu_ops {
17993 unsigned long (*read_cr2)(void);
17994@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17995 struct paravirt_callee_save make_pud;
17996
17997 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17998+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17999 #endif /* PAGETABLE_LEVELS == 4 */
18000 #endif /* PAGETABLE_LEVELS >= 3 */
18001
18002@@ -324,7 +325,13 @@ struct pv_mmu_ops {
18003 an mfn. We can tell which is which from the index. */
18004 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
18005 phys_addr_t phys, pgprot_t flags);
18006-};
18007+
18008+#ifdef CONFIG_PAX_KERNEXEC
18009+ unsigned long (*pax_open_kernel)(void);
18010+ unsigned long (*pax_close_kernel)(void);
18011+#endif
18012+
18013+} __no_randomize_layout;
18014
18015 struct arch_spinlock;
18016 #ifdef CONFIG_SMP
18017@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
18018 struct pv_lock_ops {
18019 struct paravirt_callee_save lock_spinning;
18020 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
18021-};
18022+} __no_randomize_layout;
18023
18024 /* This contains all the paravirt structures: we get a convenient
18025 * number for each function using the offset which we use to indicate
18026- * what to patch. */
18027+ * what to patch.
18028+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
18029+ */
18030+
18031 struct paravirt_patch_template {
18032 struct pv_init_ops pv_init_ops;
18033 struct pv_time_ops pv_time_ops;
18034@@ -349,7 +359,7 @@ struct paravirt_patch_template {
18035 struct pv_apic_ops pv_apic_ops;
18036 struct pv_mmu_ops pv_mmu_ops;
18037 struct pv_lock_ops pv_lock_ops;
18038-};
18039+} __no_randomize_layout;
18040
18041 extern struct pv_info pv_info;
18042 extern struct pv_init_ops pv_init_ops;
18043diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
18044index c4412e9..90e88c5 100644
18045--- a/arch/x86/include/asm/pgalloc.h
18046+++ b/arch/x86/include/asm/pgalloc.h
18047@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
18048 pmd_t *pmd, pte_t *pte)
18049 {
18050 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18051+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
18052+}
18053+
18054+static inline void pmd_populate_user(struct mm_struct *mm,
18055+ pmd_t *pmd, pte_t *pte)
18056+{
18057+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18058 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
18059 }
18060
18061@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
18062
18063 #ifdef CONFIG_X86_PAE
18064 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
18065+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
18066+{
18067+ pud_populate(mm, pudp, pmd);
18068+}
18069 #else /* !CONFIG_X86_PAE */
18070 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18071 {
18072 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18073 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
18074 }
18075+
18076+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18077+{
18078+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18079+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
18080+}
18081 #endif /* CONFIG_X86_PAE */
18082
18083 #if PAGETABLE_LEVELS > 3
18084@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18085 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
18086 }
18087
18088+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18089+{
18090+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
18091+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
18092+}
18093+
18094 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
18095 {
18096 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
18097diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
18098index 206a87f..1623b06 100644
18099--- a/arch/x86/include/asm/pgtable-2level.h
18100+++ b/arch/x86/include/asm/pgtable-2level.h
18101@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
18102
18103 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18104 {
18105+ pax_open_kernel();
18106 *pmdp = pmd;
18107+ pax_close_kernel();
18108 }
18109
18110 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18111diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
18112index 81bb91b..9392125 100644
18113--- a/arch/x86/include/asm/pgtable-3level.h
18114+++ b/arch/x86/include/asm/pgtable-3level.h
18115@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18116
18117 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18118 {
18119+ pax_open_kernel();
18120 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
18121+ pax_close_kernel();
18122 }
18123
18124 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18125 {
18126+ pax_open_kernel();
18127 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
18128+ pax_close_kernel();
18129 }
18130
18131 /*
18132diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18133index 0ec0560..5dc64bd 100644
18134--- a/arch/x86/include/asm/pgtable.h
18135+++ b/arch/x86/include/asm/pgtable.h
18136@@ -46,6 +46,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18137
18138 #ifndef __PAGETABLE_PUD_FOLDED
18139 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
18140+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
18141 #define pgd_clear(pgd) native_pgd_clear(pgd)
18142 #endif
18143
18144@@ -83,12 +84,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18145
18146 #define arch_end_context_switch(prev) do {} while(0)
18147
18148+#define pax_open_kernel() native_pax_open_kernel()
18149+#define pax_close_kernel() native_pax_close_kernel()
18150 #endif /* CONFIG_PARAVIRT */
18151
18152+#define __HAVE_ARCH_PAX_OPEN_KERNEL
18153+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
18154+
18155+#ifdef CONFIG_PAX_KERNEXEC
18156+static inline unsigned long native_pax_open_kernel(void)
18157+{
18158+ unsigned long cr0;
18159+
18160+ preempt_disable();
18161+ barrier();
18162+ cr0 = read_cr0() ^ X86_CR0_WP;
18163+ BUG_ON(cr0 & X86_CR0_WP);
18164+ write_cr0(cr0);
18165+ return cr0 ^ X86_CR0_WP;
18166+}
18167+
18168+static inline unsigned long native_pax_close_kernel(void)
18169+{
18170+ unsigned long cr0;
18171+
18172+ cr0 = read_cr0() ^ X86_CR0_WP;
18173+ BUG_ON(!(cr0 & X86_CR0_WP));
18174+ write_cr0(cr0);
18175+ barrier();
18176+ preempt_enable_no_resched();
18177+ return cr0 ^ X86_CR0_WP;
18178+}
18179+#else
18180+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18181+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18182+#endif
18183+
18184 /*
18185 * The following only work if pte_present() is true.
18186 * Undefined behaviour if not..
18187 */
18188+static inline int pte_user(pte_t pte)
18189+{
18190+ return pte_val(pte) & _PAGE_USER;
18191+}
18192+
18193 static inline int pte_dirty(pte_t pte)
18194 {
18195 return pte_flags(pte) & _PAGE_DIRTY;
18196@@ -150,6 +190,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18197 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18198 }
18199
18200+static inline unsigned long pgd_pfn(pgd_t pgd)
18201+{
18202+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18203+}
18204+
18205 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18206
18207 static inline int pmd_large(pmd_t pte)
18208@@ -203,9 +248,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18209 return pte_clear_flags(pte, _PAGE_RW);
18210 }
18211
18212+static inline pte_t pte_mkread(pte_t pte)
18213+{
18214+ return __pte(pte_val(pte) | _PAGE_USER);
18215+}
18216+
18217 static inline pte_t pte_mkexec(pte_t pte)
18218 {
18219- return pte_clear_flags(pte, _PAGE_NX);
18220+#ifdef CONFIG_X86_PAE
18221+ if (__supported_pte_mask & _PAGE_NX)
18222+ return pte_clear_flags(pte, _PAGE_NX);
18223+ else
18224+#endif
18225+ return pte_set_flags(pte, _PAGE_USER);
18226+}
18227+
18228+static inline pte_t pte_exprotect(pte_t pte)
18229+{
18230+#ifdef CONFIG_X86_PAE
18231+ if (__supported_pte_mask & _PAGE_NX)
18232+ return pte_set_flags(pte, _PAGE_NX);
18233+ else
18234+#endif
18235+ return pte_clear_flags(pte, _PAGE_USER);
18236 }
18237
18238 static inline pte_t pte_mkdirty(pte_t pte)
18239@@ -435,6 +500,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18240 #endif
18241
18242 #ifndef __ASSEMBLY__
18243+
18244+#ifdef CONFIG_PAX_PER_CPU_PGD
18245+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18246+enum cpu_pgd_type {kernel = 0, user = 1};
18247+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18248+{
18249+ return cpu_pgd[cpu][type];
18250+}
18251+#endif
18252+
18253 #include <linux/mm_types.h>
18254 #include <linux/mmdebug.h>
18255 #include <linux/log2.h>
18256@@ -581,7 +656,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18257 * Currently stuck as a macro due to indirect forward reference to
18258 * linux/mmzone.h's __section_mem_map_addr() definition:
18259 */
18260-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18261+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18262
18263 /* Find an entry in the second-level page table.. */
18264 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18265@@ -621,7 +696,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18266 * Currently stuck as a macro due to indirect forward reference to
18267 * linux/mmzone.h's __section_mem_map_addr() definition:
18268 */
18269-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18270+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18271
18272 /* to find an entry in a page-table-directory. */
18273 static inline unsigned long pud_index(unsigned long address)
18274@@ -636,7 +711,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18275
18276 static inline int pgd_bad(pgd_t pgd)
18277 {
18278- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18279+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18280 }
18281
18282 static inline int pgd_none(pgd_t pgd)
18283@@ -659,7 +734,12 @@ static inline int pgd_none(pgd_t pgd)
18284 * pgd_offset() returns a (pgd_t *)
18285 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18286 */
18287-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18288+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18289+
18290+#ifdef CONFIG_PAX_PER_CPU_PGD
18291+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18292+#endif
18293+
18294 /*
18295 * a shortcut which implies the use of the kernel's pgd, instead
18296 * of a process's
18297@@ -670,6 +750,23 @@ static inline int pgd_none(pgd_t pgd)
18298 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18299 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18300
18301+#ifdef CONFIG_X86_32
18302+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18303+#else
18304+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18305+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18306+
18307+#ifdef CONFIG_PAX_MEMORY_UDEREF
18308+#ifdef __ASSEMBLY__
18309+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18310+#else
18311+extern unsigned long pax_user_shadow_base;
18312+extern pgdval_t clone_pgd_mask;
18313+#endif
18314+#endif
18315+
18316+#endif
18317+
18318 #ifndef __ASSEMBLY__
18319
18320 extern int direct_gbpages;
18321@@ -836,11 +933,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18322 * dst and src can be on the same page, but the range must not overlap,
18323 * and must not cross a page boundary.
18324 */
18325-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18326+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18327 {
18328- memcpy(dst, src, count * sizeof(pgd_t));
18329+ pax_open_kernel();
18330+ while (count--)
18331+ *dst++ = *src++;
18332+ pax_close_kernel();
18333 }
18334
18335+#ifdef CONFIG_PAX_PER_CPU_PGD
18336+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18337+#endif
18338+
18339+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18340+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18341+#else
18342+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18343+#endif
18344+
18345 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18346 static inline int page_level_shift(enum pg_level level)
18347 {
18348diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18349index 9ee3221..b979c6b 100644
18350--- a/arch/x86/include/asm/pgtable_32.h
18351+++ b/arch/x86/include/asm/pgtable_32.h
18352@@ -25,9 +25,6 @@
18353 struct mm_struct;
18354 struct vm_area_struct;
18355
18356-extern pgd_t swapper_pg_dir[1024];
18357-extern pgd_t initial_page_table[1024];
18358-
18359 static inline void pgtable_cache_init(void) { }
18360 static inline void check_pgt_cache(void) { }
18361 void paging_init(void);
18362@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18363 # include <asm/pgtable-2level.h>
18364 #endif
18365
18366+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18367+extern pgd_t initial_page_table[PTRS_PER_PGD];
18368+#ifdef CONFIG_X86_PAE
18369+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18370+#endif
18371+
18372 #if defined(CONFIG_HIGHPTE)
18373 #define pte_offset_map(dir, address) \
18374 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18375@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18376 /* Clear a kernel PTE and flush it from the TLB */
18377 #define kpte_clear_flush(ptep, vaddr) \
18378 do { \
18379+ pax_open_kernel(); \
18380 pte_clear(&init_mm, (vaddr), (ptep)); \
18381+ pax_close_kernel(); \
18382 __flush_tlb_one((vaddr)); \
18383 } while (0)
18384
18385 #endif /* !__ASSEMBLY__ */
18386
18387+#define HAVE_ARCH_UNMAPPED_AREA
18388+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18389+
18390 /*
18391 * kern_addr_valid() is (1) for FLATMEM and (0) for
18392 * SPARSEMEM and DISCONTIGMEM
18393diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18394index ed5903b..c7fe163 100644
18395--- a/arch/x86/include/asm/pgtable_32_types.h
18396+++ b/arch/x86/include/asm/pgtable_32_types.h
18397@@ -8,7 +8,7 @@
18398 */
18399 #ifdef CONFIG_X86_PAE
18400 # include <asm/pgtable-3level_types.h>
18401-# define PMD_SIZE (1UL << PMD_SHIFT)
18402+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18403 # define PMD_MASK (~(PMD_SIZE - 1))
18404 #else
18405 # include <asm/pgtable-2level_types.h>
18406@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18407 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18408 #endif
18409
18410+#ifdef CONFIG_PAX_KERNEXEC
18411+#ifndef __ASSEMBLY__
18412+extern unsigned char MODULES_EXEC_VADDR[];
18413+extern unsigned char MODULES_EXEC_END[];
18414+#endif
18415+#include <asm/boot.h>
18416+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18417+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18418+#else
18419+#define ktla_ktva(addr) (addr)
18420+#define ktva_ktla(addr) (addr)
18421+#endif
18422+
18423 #define MODULES_VADDR VMALLOC_START
18424 #define MODULES_END VMALLOC_END
18425 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18426diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18427index 5be9063..d62185b 100644
18428--- a/arch/x86/include/asm/pgtable_64.h
18429+++ b/arch/x86/include/asm/pgtable_64.h
18430@@ -16,10 +16,14 @@
18431
18432 extern pud_t level3_kernel_pgt[512];
18433 extern pud_t level3_ident_pgt[512];
18434+extern pud_t level3_vmalloc_start_pgt[512];
18435+extern pud_t level3_vmalloc_end_pgt[512];
18436+extern pud_t level3_vmemmap_pgt[512];
18437+extern pud_t level2_vmemmap_pgt[512];
18438 extern pmd_t level2_kernel_pgt[512];
18439 extern pmd_t level2_fixmap_pgt[512];
18440-extern pmd_t level2_ident_pgt[512];
18441-extern pgd_t init_level4_pgt[];
18442+extern pmd_t level2_ident_pgt[512*2];
18443+extern pgd_t init_level4_pgt[512];
18444
18445 #define swapper_pg_dir init_level4_pgt
18446
18447@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18448
18449 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18450 {
18451+ pax_open_kernel();
18452 *pmdp = pmd;
18453+ pax_close_kernel();
18454 }
18455
18456 static inline void native_pmd_clear(pmd_t *pmd)
18457@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18458
18459 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18460 {
18461+ pax_open_kernel();
18462 *pudp = pud;
18463+ pax_close_kernel();
18464 }
18465
18466 static inline void native_pud_clear(pud_t *pud)
18467@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
18468
18469 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18470 {
18471+ pax_open_kernel();
18472+ *pgdp = pgd;
18473+ pax_close_kernel();
18474+}
18475+
18476+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18477+{
18478 *pgdp = pgd;
18479 }
18480
18481diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18482index 7166e25..baaa6fe 100644
18483--- a/arch/x86/include/asm/pgtable_64_types.h
18484+++ b/arch/x86/include/asm/pgtable_64_types.h
18485@@ -61,9 +61,14 @@ typedef struct { pteval_t pte; } pte_t;
18486 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18487 #define MODULES_END _AC(0xffffffffff000000, UL)
18488 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18489+#define MODULES_EXEC_VADDR MODULES_VADDR
18490+#define MODULES_EXEC_END MODULES_END
18491 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18492 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18493
18494+#define ktla_ktva(addr) (addr)
18495+#define ktva_ktla(addr) (addr)
18496+
18497 #define EARLY_DYNAMIC_PAGE_TABLES 64
18498
18499 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18500diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18501index f216963..6bd7c21 100644
18502--- a/arch/x86/include/asm/pgtable_types.h
18503+++ b/arch/x86/include/asm/pgtable_types.h
18504@@ -111,8 +111,10 @@
18505
18506 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18507 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18508-#else
18509+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18510 #define _PAGE_NX (_AT(pteval_t, 0))
18511+#else
18512+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18513 #endif
18514
18515 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
18516@@ -151,6 +153,9 @@
18517 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18518 _PAGE_ACCESSED)
18519
18520+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18521+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18522+
18523 #define __PAGE_KERNEL_EXEC \
18524 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18525 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18526@@ -161,7 +166,7 @@
18527 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
18528 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
18529 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
18530-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18531+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18532 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18533 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
18534 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18535@@ -218,7 +223,7 @@
18536 #ifdef CONFIG_X86_64
18537 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18538 #else
18539-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18540+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18541 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18542 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18543 #endif
18544@@ -257,7 +262,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18545 {
18546 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18547 }
18548+#endif
18549
18550+#if PAGETABLE_LEVELS == 3
18551+#include <asm-generic/pgtable-nopud.h>
18552+#endif
18553+
18554+#if PAGETABLE_LEVELS == 2
18555+#include <asm-generic/pgtable-nopmd.h>
18556+#endif
18557+
18558+#ifndef __ASSEMBLY__
18559 #if PAGETABLE_LEVELS > 3
18560 typedef struct { pudval_t pud; } pud_t;
18561
18562@@ -271,8 +286,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18563 return pud.pud;
18564 }
18565 #else
18566-#include <asm-generic/pgtable-nopud.h>
18567-
18568 static inline pudval_t native_pud_val(pud_t pud)
18569 {
18570 return native_pgd_val(pud.pgd);
18571@@ -292,8 +305,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18572 return pmd.pmd;
18573 }
18574 #else
18575-#include <asm-generic/pgtable-nopmd.h>
18576-
18577 static inline pmdval_t native_pmd_val(pmd_t pmd)
18578 {
18579 return native_pgd_val(pmd.pud.pgd);
18580@@ -333,7 +344,6 @@ typedef struct page *pgtable_t;
18581
18582 extern pteval_t __supported_pte_mask;
18583 extern void set_nx(void);
18584-extern int nx_enabled;
18585
18586 #define pgprot_writecombine pgprot_writecombine
18587 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18588diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18589index 7024c12..71c46b9 100644
18590--- a/arch/x86/include/asm/preempt.h
18591+++ b/arch/x86/include/asm/preempt.h
18592@@ -87,7 +87,7 @@ static __always_inline void __preempt_count_sub(int val)
18593 */
18594 static __always_inline bool __preempt_count_dec_and_test(void)
18595 {
18596- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18597+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18598 }
18599
18600 /*
18601diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18602index a4ea023..33aa874 100644
18603--- a/arch/x86/include/asm/processor.h
18604+++ b/arch/x86/include/asm/processor.h
18605@@ -128,7 +128,7 @@ struct cpuinfo_x86 {
18606 /* Index into per_cpu list: */
18607 u16 cpu_index;
18608 u32 microcode;
18609-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
18610+} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout;
18611
18612 #define X86_VENDOR_INTEL 0
18613 #define X86_VENDOR_CYRIX 1
18614@@ -199,9 +199,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18615 : "memory");
18616 }
18617
18618+/* invpcid (%rdx),%rax */
18619+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18620+
18621+#define INVPCID_SINGLE_ADDRESS 0UL
18622+#define INVPCID_SINGLE_CONTEXT 1UL
18623+#define INVPCID_ALL_GLOBAL 2UL
18624+#define INVPCID_ALL_MONGLOBAL 3UL
18625+
18626+#define PCID_KERNEL 0UL
18627+#define PCID_USER 1UL
18628+#define PCID_NOFLUSH (1UL << 63)
18629+
18630 static inline void load_cr3(pgd_t *pgdir)
18631 {
18632- write_cr3(__pa(pgdir));
18633+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18634 }
18635
18636 #ifdef CONFIG_X86_32
18637@@ -283,7 +295,7 @@ struct tss_struct {
18638
18639 } ____cacheline_aligned;
18640
18641-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18642+extern struct tss_struct init_tss[NR_CPUS];
18643
18644 /*
18645 * Save the original ist values for checking stack pointers during debugging
18646@@ -479,6 +491,7 @@ struct thread_struct {
18647 unsigned short ds;
18648 unsigned short fsindex;
18649 unsigned short gsindex;
18650+ unsigned short ss;
18651 #endif
18652 #ifdef CONFIG_X86_32
18653 unsigned long ip;
18654@@ -588,29 +601,8 @@ static inline void load_sp0(struct tss_struct *tss,
18655 extern unsigned long mmu_cr4_features;
18656 extern u32 *trampoline_cr4_features;
18657
18658-static inline void set_in_cr4(unsigned long mask)
18659-{
18660- unsigned long cr4;
18661-
18662- mmu_cr4_features |= mask;
18663- if (trampoline_cr4_features)
18664- *trampoline_cr4_features = mmu_cr4_features;
18665- cr4 = read_cr4();
18666- cr4 |= mask;
18667- write_cr4(cr4);
18668-}
18669-
18670-static inline void clear_in_cr4(unsigned long mask)
18671-{
18672- unsigned long cr4;
18673-
18674- mmu_cr4_features &= ~mask;
18675- if (trampoline_cr4_features)
18676- *trampoline_cr4_features = mmu_cr4_features;
18677- cr4 = read_cr4();
18678- cr4 &= ~mask;
18679- write_cr4(cr4);
18680-}
18681+extern void set_in_cr4(unsigned long mask);
18682+extern void clear_in_cr4(unsigned long mask);
18683
18684 typedef struct {
18685 unsigned long seg;
18686@@ -836,11 +828,18 @@ static inline void spin_lock_prefetch(const void *x)
18687 */
18688 #define TASK_SIZE PAGE_OFFSET
18689 #define TASK_SIZE_MAX TASK_SIZE
18690+
18691+#ifdef CONFIG_PAX_SEGMEXEC
18692+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18693+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18694+#else
18695 #define STACK_TOP TASK_SIZE
18696-#define STACK_TOP_MAX STACK_TOP
18697+#endif
18698+
18699+#define STACK_TOP_MAX TASK_SIZE
18700
18701 #define INIT_THREAD { \
18702- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18703+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18704 .vm86_info = NULL, \
18705 .sysenter_cs = __KERNEL_CS, \
18706 .io_bitmap_ptr = NULL, \
18707@@ -854,7 +853,7 @@ static inline void spin_lock_prefetch(const void *x)
18708 */
18709 #define INIT_TSS { \
18710 .x86_tss = { \
18711- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18712+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18713 .ss0 = __KERNEL_DS, \
18714 .ss1 = __KERNEL_CS, \
18715 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18716@@ -865,11 +864,7 @@ static inline void spin_lock_prefetch(const void *x)
18717 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18718
18719 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18720-#define KSTK_TOP(info) \
18721-({ \
18722- unsigned long *__ptr = (unsigned long *)(info); \
18723- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18724-})
18725+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18726
18727 /*
18728 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18729@@ -884,7 +879,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18730 #define task_pt_regs(task) \
18731 ({ \
18732 struct pt_regs *__regs__; \
18733- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18734+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18735 __regs__ - 1; \
18736 })
18737
18738@@ -894,13 +889,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18739 /*
18740 * User space process size. 47bits minus one guard page.
18741 */
18742-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18743+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18744
18745 /* This decides where the kernel will search for a free chunk of vm
18746 * space during mmap's.
18747 */
18748 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18749- 0xc0000000 : 0xFFFFe000)
18750+ 0xc0000000 : 0xFFFFf000)
18751
18752 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18753 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18754@@ -911,11 +906,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18755 #define STACK_TOP_MAX TASK_SIZE_MAX
18756
18757 #define INIT_THREAD { \
18758- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18759+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18760 }
18761
18762 #define INIT_TSS { \
18763- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18764+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18765 }
18766
18767 /*
18768@@ -943,6 +938,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18769 */
18770 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18771
18772+#ifdef CONFIG_PAX_SEGMEXEC
18773+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18774+#endif
18775+
18776 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18777
18778 /* Get/set a process' ability to use the timestamp counter instruction */
18779@@ -969,7 +968,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18780 return 0;
18781 }
18782
18783-extern unsigned long arch_align_stack(unsigned long sp);
18784+#define arch_align_stack(x) ((x) & ~0xfUL)
18785 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18786
18787 void default_idle(void);
18788@@ -979,6 +978,6 @@ bool xen_set_default_idle(void);
18789 #define xen_set_default_idle 0
18790 #endif
18791
18792-void stop_this_cpu(void *dummy);
18793+void stop_this_cpu(void *dummy) __noreturn;
18794 void df_debug(struct pt_regs *regs, long error_code);
18795 #endif /* _ASM_X86_PROCESSOR_H */
18796diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18797index 6205f0c..688a3a9 100644
18798--- a/arch/x86/include/asm/ptrace.h
18799+++ b/arch/x86/include/asm/ptrace.h
18800@@ -84,28 +84,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18801 }
18802
18803 /*
18804- * user_mode_vm(regs) determines whether a register set came from user mode.
18805+ * user_mode(regs) determines whether a register set came from user mode.
18806 * This is true if V8086 mode was enabled OR if the register set was from
18807 * protected mode with RPL-3 CS value. This tricky test checks that with
18808 * one comparison. Many places in the kernel can bypass this full check
18809- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18810+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18811+ * be used.
18812 */
18813-static inline int user_mode(struct pt_regs *regs)
18814+static inline int user_mode_novm(struct pt_regs *regs)
18815 {
18816 #ifdef CONFIG_X86_32
18817 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18818 #else
18819- return !!(regs->cs & 3);
18820+ return !!(regs->cs & SEGMENT_RPL_MASK);
18821 #endif
18822 }
18823
18824-static inline int user_mode_vm(struct pt_regs *regs)
18825+static inline int user_mode(struct pt_regs *regs)
18826 {
18827 #ifdef CONFIG_X86_32
18828 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18829 USER_RPL;
18830 #else
18831- return user_mode(regs);
18832+ return user_mode_novm(regs);
18833 #endif
18834 }
18835
18836@@ -121,15 +122,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18837 #ifdef CONFIG_X86_64
18838 static inline bool user_64bit_mode(struct pt_regs *regs)
18839 {
18840+ unsigned long cs = regs->cs & 0xffff;
18841 #ifndef CONFIG_PARAVIRT
18842 /*
18843 * On non-paravirt systems, this is the only long mode CPL 3
18844 * selector. We do not allow long mode selectors in the LDT.
18845 */
18846- return regs->cs == __USER_CS;
18847+ return cs == __USER_CS;
18848 #else
18849 /* Headers are too twisted for this to go in paravirt.h. */
18850- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18851+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18852 #endif
18853 }
18854
18855@@ -180,9 +182,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18856 * Traps from the kernel do not save sp and ss.
18857 * Use the helper function to retrieve sp.
18858 */
18859- if (offset == offsetof(struct pt_regs, sp) &&
18860- regs->cs == __KERNEL_CS)
18861- return kernel_stack_pointer(regs);
18862+ if (offset == offsetof(struct pt_regs, sp)) {
18863+ unsigned long cs = regs->cs & 0xffff;
18864+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18865+ return kernel_stack_pointer(regs);
18866+ }
18867 #endif
18868 return *(unsigned long *)((unsigned long)regs + offset);
18869 }
18870diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18871index 70f46f0..adfbdb4 100644
18872--- a/arch/x86/include/asm/qrwlock.h
18873+++ b/arch/x86/include/asm/qrwlock.h
18874@@ -7,8 +7,8 @@
18875 #define queue_write_unlock queue_write_unlock
18876 static inline void queue_write_unlock(struct qrwlock *lock)
18877 {
18878- barrier();
18879- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18880+ barrier();
18881+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18882 }
18883 #endif
18884
18885diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18886index 9c6b890..5305f53 100644
18887--- a/arch/x86/include/asm/realmode.h
18888+++ b/arch/x86/include/asm/realmode.h
18889@@ -22,16 +22,14 @@ struct real_mode_header {
18890 #endif
18891 /* APM/BIOS reboot */
18892 u32 machine_real_restart_asm;
18893-#ifdef CONFIG_X86_64
18894 u32 machine_real_restart_seg;
18895-#endif
18896 };
18897
18898 /* This must match data at trampoline_32/64.S */
18899 struct trampoline_header {
18900 #ifdef CONFIG_X86_32
18901 u32 start;
18902- u16 gdt_pad;
18903+ u16 boot_cs;
18904 u16 gdt_limit;
18905 u32 gdt_base;
18906 #else
18907diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18908index a82c4f1..ac45053 100644
18909--- a/arch/x86/include/asm/reboot.h
18910+++ b/arch/x86/include/asm/reboot.h
18911@@ -6,13 +6,13 @@
18912 struct pt_regs;
18913
18914 struct machine_ops {
18915- void (*restart)(char *cmd);
18916- void (*halt)(void);
18917- void (*power_off)(void);
18918+ void (* __noreturn restart)(char *cmd);
18919+ void (* __noreturn halt)(void);
18920+ void (* __noreturn power_off)(void);
18921 void (*shutdown)(void);
18922 void (*crash_shutdown)(struct pt_regs *);
18923- void (*emergency_restart)(void);
18924-};
18925+ void (* __noreturn emergency_restart)(void);
18926+} __no_const;
18927
18928 extern struct machine_ops machine_ops;
18929
18930diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18931index 8f7866a..e442f20 100644
18932--- a/arch/x86/include/asm/rmwcc.h
18933+++ b/arch/x86/include/asm/rmwcc.h
18934@@ -3,7 +3,34 @@
18935
18936 #ifdef CC_HAVE_ASM_GOTO
18937
18938-#define __GEN_RMWcc(fullop, var, cc, ...) \
18939+#ifdef CONFIG_PAX_REFCOUNT
18940+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18941+do { \
18942+ asm_volatile_goto (fullop \
18943+ ";jno 0f\n" \
18944+ fullantiop \
18945+ ";int $4\n0:\n" \
18946+ _ASM_EXTABLE(0b, 0b) \
18947+ ";j" cc " %l[cc_label]" \
18948+ : : "m" (var), ## __VA_ARGS__ \
18949+ : "memory" : cc_label); \
18950+ return 0; \
18951+cc_label: \
18952+ return 1; \
18953+} while (0)
18954+#else
18955+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18956+do { \
18957+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18958+ : : "m" (var), ## __VA_ARGS__ \
18959+ : "memory" : cc_label); \
18960+ return 0; \
18961+cc_label: \
18962+ return 1; \
18963+} while (0)
18964+#endif
18965+
18966+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18967 do { \
18968 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18969 : : "m" (var), ## __VA_ARGS__ \
18970@@ -13,15 +40,46 @@ cc_label: \
18971 return 1; \
18972 } while (0)
18973
18974-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18975- __GEN_RMWcc(op " " arg0, var, cc)
18976+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18977+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18978
18979-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18980- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18981+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18982+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18983+
18984+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18985+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18986+
18987+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18988+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18989
18990 #else /* !CC_HAVE_ASM_GOTO */
18991
18992-#define __GEN_RMWcc(fullop, var, cc, ...) \
18993+#ifdef CONFIG_PAX_REFCOUNT
18994+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18995+do { \
18996+ char c; \
18997+ asm volatile (fullop \
18998+ ";jno 0f\n" \
18999+ fullantiop \
19000+ ";int $4\n0:\n" \
19001+ _ASM_EXTABLE(0b, 0b) \
19002+ "; set" cc " %1" \
19003+ : "+m" (var), "=qm" (c) \
19004+ : __VA_ARGS__ : "memory"); \
19005+ return c != 0; \
19006+} while (0)
19007+#else
19008+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19009+do { \
19010+ char c; \
19011+ asm volatile (fullop "; set" cc " %1" \
19012+ : "+m" (var), "=qm" (c) \
19013+ : __VA_ARGS__ : "memory"); \
19014+ return c != 0; \
19015+} while (0)
19016+#endif
19017+
19018+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
19019 do { \
19020 char c; \
19021 asm volatile (fullop "; set" cc " %1" \
19022@@ -30,11 +88,17 @@ do { \
19023 return c != 0; \
19024 } while (0)
19025
19026-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
19027- __GEN_RMWcc(op " " arg0, var, cc)
19028+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
19029+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
19030+
19031+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
19032+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
19033+
19034+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
19035+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
19036
19037-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
19038- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
19039+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
19040+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
19041
19042 #endif /* CC_HAVE_ASM_GOTO */
19043
19044diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
19045index cad82c9..2e5c5c1 100644
19046--- a/arch/x86/include/asm/rwsem.h
19047+++ b/arch/x86/include/asm/rwsem.h
19048@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
19049 {
19050 asm volatile("# beginning down_read\n\t"
19051 LOCK_PREFIX _ASM_INC "(%1)\n\t"
19052+
19053+#ifdef CONFIG_PAX_REFCOUNT
19054+ "jno 0f\n"
19055+ LOCK_PREFIX _ASM_DEC "(%1)\n"
19056+ "int $4\n0:\n"
19057+ _ASM_EXTABLE(0b, 0b)
19058+#endif
19059+
19060 /* adds 0x00000001 */
19061 " jns 1f\n"
19062 " call call_rwsem_down_read_failed\n"
19063@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
19064 "1:\n\t"
19065 " mov %1,%2\n\t"
19066 " add %3,%2\n\t"
19067+
19068+#ifdef CONFIG_PAX_REFCOUNT
19069+ "jno 0f\n"
19070+ "sub %3,%2\n"
19071+ "int $4\n0:\n"
19072+ _ASM_EXTABLE(0b, 0b)
19073+#endif
19074+
19075 " jle 2f\n\t"
19076 LOCK_PREFIX " cmpxchg %2,%0\n\t"
19077 " jnz 1b\n\t"
19078@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
19079 long tmp;
19080 asm volatile("# beginning down_write\n\t"
19081 LOCK_PREFIX " xadd %1,(%2)\n\t"
19082+
19083+#ifdef CONFIG_PAX_REFCOUNT
19084+ "jno 0f\n"
19085+ "mov %1,(%2)\n"
19086+ "int $4\n0:\n"
19087+ _ASM_EXTABLE(0b, 0b)
19088+#endif
19089+
19090 /* adds 0xffff0001, returns the old value */
19091 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
19092 /* was the active mask 0 before? */
19093@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
19094 long tmp;
19095 asm volatile("# beginning __up_read\n\t"
19096 LOCK_PREFIX " xadd %1,(%2)\n\t"
19097+
19098+#ifdef CONFIG_PAX_REFCOUNT
19099+ "jno 0f\n"
19100+ "mov %1,(%2)\n"
19101+ "int $4\n0:\n"
19102+ _ASM_EXTABLE(0b, 0b)
19103+#endif
19104+
19105 /* subtracts 1, returns the old value */
19106 " jns 1f\n\t"
19107 " call call_rwsem_wake\n" /* expects old value in %edx */
19108@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
19109 long tmp;
19110 asm volatile("# beginning __up_write\n\t"
19111 LOCK_PREFIX " xadd %1,(%2)\n\t"
19112+
19113+#ifdef CONFIG_PAX_REFCOUNT
19114+ "jno 0f\n"
19115+ "mov %1,(%2)\n"
19116+ "int $4\n0:\n"
19117+ _ASM_EXTABLE(0b, 0b)
19118+#endif
19119+
19120 /* subtracts 0xffff0001, returns the old value */
19121 " jns 1f\n\t"
19122 " call call_rwsem_wake\n" /* expects old value in %edx */
19123@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19124 {
19125 asm volatile("# beginning __downgrade_write\n\t"
19126 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
19127+
19128+#ifdef CONFIG_PAX_REFCOUNT
19129+ "jno 0f\n"
19130+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
19131+ "int $4\n0:\n"
19132+ _ASM_EXTABLE(0b, 0b)
19133+#endif
19134+
19135 /*
19136 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
19137 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
19138@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19139 */
19140 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19141 {
19142- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
19143+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
19144+
19145+#ifdef CONFIG_PAX_REFCOUNT
19146+ "jno 0f\n"
19147+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
19148+ "int $4\n0:\n"
19149+ _ASM_EXTABLE(0b, 0b)
19150+#endif
19151+
19152 : "+m" (sem->count)
19153 : "er" (delta));
19154 }
19155@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19156 */
19157 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19158 {
19159- return delta + xadd(&sem->count, delta);
19160+ return delta + xadd_check_overflow(&sem->count, delta);
19161 }
19162
19163 #endif /* __KERNEL__ */
19164diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19165index 6f1c3a8..7744f19 100644
19166--- a/arch/x86/include/asm/segment.h
19167+++ b/arch/x86/include/asm/segment.h
19168@@ -64,10 +64,15 @@
19169 * 26 - ESPFIX small SS
19170 * 27 - per-cpu [ offset to per-cpu data area ]
19171 * 28 - stack_canary-20 [ for stack protector ]
19172- * 29 - unused
19173- * 30 - unused
19174+ * 29 - PCI BIOS CS
19175+ * 30 - PCI BIOS DS
19176 * 31 - TSS for double fault handler
19177 */
19178+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19179+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19180+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19181+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19182+
19183 #define GDT_ENTRY_TLS_MIN 6
19184 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19185
19186@@ -79,6 +84,8 @@
19187
19188 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19189
19190+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19191+
19192 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19193
19194 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19195@@ -104,6 +111,12 @@
19196 #define __KERNEL_STACK_CANARY 0
19197 #endif
19198
19199+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19200+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19201+
19202+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19203+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19204+
19205 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19206
19207 /*
19208@@ -141,7 +154,7 @@
19209 */
19210
19211 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19212-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19213+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19214
19215
19216 #else
19217@@ -165,6 +178,8 @@
19218 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19219 #define __USER32_DS __USER_DS
19220
19221+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19222+
19223 #define GDT_ENTRY_TSS 8 /* needs two entries */
19224 #define GDT_ENTRY_LDT 10 /* needs two entries */
19225 #define GDT_ENTRY_TLS_MIN 12
19226@@ -173,6 +188,8 @@
19227 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19228 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19229
19230+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19231+
19232 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19233 #define FS_TLS 0
19234 #define GS_TLS 1
19235@@ -180,12 +197,14 @@
19236 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19237 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19238
19239-#define GDT_ENTRIES 16
19240+#define GDT_ENTRIES 17
19241
19242 #endif
19243
19244 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19245+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19246 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19247+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19248 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19249 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19250 #ifndef CONFIG_PARAVIRT
19251@@ -268,7 +287,7 @@ static inline unsigned long get_limit(unsigned long segment)
19252 {
19253 unsigned long __limit;
19254 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19255- return __limit + 1;
19256+ return __limit;
19257 }
19258
19259 #endif /* !__ASSEMBLY__ */
19260diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19261index 8d3120f..352b440 100644
19262--- a/arch/x86/include/asm/smap.h
19263+++ b/arch/x86/include/asm/smap.h
19264@@ -25,11 +25,40 @@
19265
19266 #include <asm/alternative-asm.h>
19267
19268+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19269+#define ASM_PAX_OPEN_USERLAND \
19270+ 661: jmp 663f; \
19271+ .pushsection .altinstr_replacement, "a" ; \
19272+ 662: pushq %rax; nop; \
19273+ .popsection ; \
19274+ .pushsection .altinstructions, "a" ; \
19275+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19276+ .popsection ; \
19277+ call __pax_open_userland; \
19278+ popq %rax; \
19279+ 663:
19280+
19281+#define ASM_PAX_CLOSE_USERLAND \
19282+ 661: jmp 663f; \
19283+ .pushsection .altinstr_replacement, "a" ; \
19284+ 662: pushq %rax; nop; \
19285+ .popsection; \
19286+ .pushsection .altinstructions, "a" ; \
19287+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19288+ .popsection; \
19289+ call __pax_close_userland; \
19290+ popq %rax; \
19291+ 663:
19292+#else
19293+#define ASM_PAX_OPEN_USERLAND
19294+#define ASM_PAX_CLOSE_USERLAND
19295+#endif
19296+
19297 #ifdef CONFIG_X86_SMAP
19298
19299 #define ASM_CLAC \
19300 661: ASM_NOP3 ; \
19301- .pushsection .altinstr_replacement, "ax" ; \
19302+ .pushsection .altinstr_replacement, "a" ; \
19303 662: __ASM_CLAC ; \
19304 .popsection ; \
19305 .pushsection .altinstructions, "a" ; \
19306@@ -38,7 +67,7 @@
19307
19308 #define ASM_STAC \
19309 661: ASM_NOP3 ; \
19310- .pushsection .altinstr_replacement, "ax" ; \
19311+ .pushsection .altinstr_replacement, "a" ; \
19312 662: __ASM_STAC ; \
19313 .popsection ; \
19314 .pushsection .altinstructions, "a" ; \
19315@@ -56,6 +85,37 @@
19316
19317 #include <asm/alternative.h>
19318
19319+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19320+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19321+
19322+extern void __pax_open_userland(void);
19323+static __always_inline unsigned long pax_open_userland(void)
19324+{
19325+
19326+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19327+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19328+ :
19329+ : [open] "i" (__pax_open_userland)
19330+ : "memory", "rax");
19331+#endif
19332+
19333+ return 0;
19334+}
19335+
19336+extern void __pax_close_userland(void);
19337+static __always_inline unsigned long pax_close_userland(void)
19338+{
19339+
19340+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19341+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19342+ :
19343+ : [close] "i" (__pax_close_userland)
19344+ : "memory", "rax");
19345+#endif
19346+
19347+ return 0;
19348+}
19349+
19350 #ifdef CONFIG_X86_SMAP
19351
19352 static __always_inline void clac(void)
19353diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19354index 8cd27e0..7f05ec8 100644
19355--- a/arch/x86/include/asm/smp.h
19356+++ b/arch/x86/include/asm/smp.h
19357@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19358 /* cpus sharing the last level cache: */
19359 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19360 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19361-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19362+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19363
19364 static inline struct cpumask *cpu_sibling_mask(int cpu)
19365 {
19366@@ -78,7 +78,7 @@ struct smp_ops {
19367
19368 void (*send_call_func_ipi)(const struct cpumask *mask);
19369 void (*send_call_func_single_ipi)(int cpu);
19370-};
19371+} __no_const;
19372
19373 /* Globals due to paravirt */
19374 extern void set_cpu_sibling_map(int cpu);
19375@@ -190,14 +190,8 @@ extern unsigned disabled_cpus;
19376 extern int safe_smp_processor_id(void);
19377
19378 #elif defined(CONFIG_X86_64_SMP)
19379-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19380-
19381-#define stack_smp_processor_id() \
19382-({ \
19383- struct thread_info *ti; \
19384- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19385- ti->cpu; \
19386-})
19387+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19388+#define stack_smp_processor_id() raw_smp_processor_id()
19389 #define safe_smp_processor_id() smp_processor_id()
19390
19391 #endif
19392diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
19393index 54f1c80..39362a5 100644
19394--- a/arch/x86/include/asm/spinlock.h
19395+++ b/arch/x86/include/asm/spinlock.h
19396@@ -223,6 +223,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
19397 static inline void arch_read_lock(arch_rwlock_t *rw)
19398 {
19399 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
19400+
19401+#ifdef CONFIG_PAX_REFCOUNT
19402+ "jno 0f\n"
19403+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
19404+ "int $4\n0:\n"
19405+ _ASM_EXTABLE(0b, 0b)
19406+#endif
19407+
19408 "jns 1f\n"
19409 "call __read_lock_failed\n\t"
19410 "1:\n"
19411@@ -232,6 +240,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
19412 static inline void arch_write_lock(arch_rwlock_t *rw)
19413 {
19414 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
19415+
19416+#ifdef CONFIG_PAX_REFCOUNT
19417+ "jno 0f\n"
19418+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
19419+ "int $4\n0:\n"
19420+ _ASM_EXTABLE(0b, 0b)
19421+#endif
19422+
19423 "jz 1f\n"
19424 "call __write_lock_failed\n\t"
19425 "1:\n"
19426@@ -261,13 +277,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
19427
19428 static inline void arch_read_unlock(arch_rwlock_t *rw)
19429 {
19430- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
19431+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
19432+
19433+#ifdef CONFIG_PAX_REFCOUNT
19434+ "jno 0f\n"
19435+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
19436+ "int $4\n0:\n"
19437+ _ASM_EXTABLE(0b, 0b)
19438+#endif
19439+
19440 :"+m" (rw->lock) : : "memory");
19441 }
19442
19443 static inline void arch_write_unlock(arch_rwlock_t *rw)
19444 {
19445- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
19446+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
19447+
19448+#ifdef CONFIG_PAX_REFCOUNT
19449+ "jno 0f\n"
19450+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
19451+ "int $4\n0:\n"
19452+ _ASM_EXTABLE(0b, 0b)
19453+#endif
19454+
19455 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
19456 }
19457 #else
19458diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19459index 6a99859..03cb807 100644
19460--- a/arch/x86/include/asm/stackprotector.h
19461+++ b/arch/x86/include/asm/stackprotector.h
19462@@ -47,7 +47,7 @@
19463 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19464 */
19465 #define GDT_STACK_CANARY_INIT \
19466- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19467+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19468
19469 /*
19470 * Initialize the stackprotector canary value.
19471@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19472
19473 static inline void load_stack_canary_segment(void)
19474 {
19475-#ifdef CONFIG_X86_32
19476+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19477 asm volatile ("mov %0, %%gs" : : "r" (0));
19478 #endif
19479 }
19480diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19481index 70bbe39..4ae2bd4 100644
19482--- a/arch/x86/include/asm/stacktrace.h
19483+++ b/arch/x86/include/asm/stacktrace.h
19484@@ -11,28 +11,20 @@
19485
19486 extern int kstack_depth_to_print;
19487
19488-struct thread_info;
19489+struct task_struct;
19490 struct stacktrace_ops;
19491
19492-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19493- unsigned long *stack,
19494- unsigned long bp,
19495- const struct stacktrace_ops *ops,
19496- void *data,
19497- unsigned long *end,
19498- int *graph);
19499+typedef unsigned long walk_stack_t(struct task_struct *task,
19500+ void *stack_start,
19501+ unsigned long *stack,
19502+ unsigned long bp,
19503+ const struct stacktrace_ops *ops,
19504+ void *data,
19505+ unsigned long *end,
19506+ int *graph);
19507
19508-extern unsigned long
19509-print_context_stack(struct thread_info *tinfo,
19510- unsigned long *stack, unsigned long bp,
19511- const struct stacktrace_ops *ops, void *data,
19512- unsigned long *end, int *graph);
19513-
19514-extern unsigned long
19515-print_context_stack_bp(struct thread_info *tinfo,
19516- unsigned long *stack, unsigned long bp,
19517- const struct stacktrace_ops *ops, void *data,
19518- unsigned long *end, int *graph);
19519+extern walk_stack_t print_context_stack;
19520+extern walk_stack_t print_context_stack_bp;
19521
19522 /* Generic stack tracer with callbacks */
19523
19524@@ -40,7 +32,7 @@ struct stacktrace_ops {
19525 void (*address)(void *data, unsigned long address, int reliable);
19526 /* On negative return stop dumping */
19527 int (*stack)(void *data, char *name);
19528- walk_stack_t walk_stack;
19529+ walk_stack_t *walk_stack;
19530 };
19531
19532 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19533diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19534index d7f3b3b..3cc39f1 100644
19535--- a/arch/x86/include/asm/switch_to.h
19536+++ b/arch/x86/include/asm/switch_to.h
19537@@ -108,7 +108,7 @@ do { \
19538 "call __switch_to\n\t" \
19539 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19540 __switch_canary \
19541- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19542+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19543 "movq %%rax,%%rdi\n\t" \
19544 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19545 "jnz ret_from_fork\n\t" \
19546@@ -119,7 +119,7 @@ do { \
19547 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19548 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19549 [_tif_fork] "i" (_TIF_FORK), \
19550- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19551+ [thread_info] "m" (current_tinfo), \
19552 [current_task] "m" (current_task) \
19553 __switch_canary_iparam \
19554 : "memory", "cc" __EXTRA_CLOBBER)
19555diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19556index 8540538..4b0b5e9 100644
19557--- a/arch/x86/include/asm/thread_info.h
19558+++ b/arch/x86/include/asm/thread_info.h
19559@@ -24,7 +24,6 @@ struct exec_domain;
19560 #include <linux/atomic.h>
19561
19562 struct thread_info {
19563- struct task_struct *task; /* main task structure */
19564 struct exec_domain *exec_domain; /* execution domain */
19565 __u32 flags; /* low level flags */
19566 __u32 status; /* thread synchronous flags */
19567@@ -33,13 +32,13 @@ struct thread_info {
19568 mm_segment_t addr_limit;
19569 struct restart_block restart_block;
19570 void __user *sysenter_return;
19571+ unsigned long lowest_stack;
19572 unsigned int sig_on_uaccess_error:1;
19573 unsigned int uaccess_err:1; /* uaccess failed */
19574 };
19575
19576-#define INIT_THREAD_INFO(tsk) \
19577+#define INIT_THREAD_INFO \
19578 { \
19579- .task = &tsk, \
19580 .exec_domain = &default_exec_domain, \
19581 .flags = 0, \
19582 .cpu = 0, \
19583@@ -50,7 +49,7 @@ struct thread_info {
19584 }, \
19585 }
19586
19587-#define init_thread_info (init_thread_union.thread_info)
19588+#define init_thread_info (init_thread_union.stack)
19589 #define init_stack (init_thread_union.stack)
19590
19591 #else /* !__ASSEMBLY__ */
19592@@ -91,6 +90,7 @@ struct thread_info {
19593 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19594 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19595 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19596+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19597
19598 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19599 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19600@@ -115,17 +115,18 @@ struct thread_info {
19601 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19602 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19603 #define _TIF_X32 (1 << TIF_X32)
19604+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19605
19606 /* work to do in syscall_trace_enter() */
19607 #define _TIF_WORK_SYSCALL_ENTRY \
19608 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19609 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19610- _TIF_NOHZ)
19611+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19612
19613 /* work to do in syscall_trace_leave() */
19614 #define _TIF_WORK_SYSCALL_EXIT \
19615 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19616- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19617+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19618
19619 /* work to do on interrupt/exception return */
19620 #define _TIF_WORK_MASK \
19621@@ -136,7 +137,7 @@ struct thread_info {
19622 /* work to do on any return to user space */
19623 #define _TIF_ALLWORK_MASK \
19624 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19625- _TIF_NOHZ)
19626+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19627
19628 /* Only used for 64 bit */
19629 #define _TIF_DO_NOTIFY_MASK \
19630@@ -151,7 +152,6 @@ struct thread_info {
19631 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19632
19633 #define STACK_WARN (THREAD_SIZE/8)
19634-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19635
19636 /*
19637 * macros/functions for gaining access to the thread information structure
19638@@ -162,26 +162,18 @@ struct thread_info {
19639
19640 DECLARE_PER_CPU(unsigned long, kernel_stack);
19641
19642+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19643+
19644 static inline struct thread_info *current_thread_info(void)
19645 {
19646- struct thread_info *ti;
19647- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19648- KERNEL_STACK_OFFSET - THREAD_SIZE);
19649- return ti;
19650+ return this_cpu_read_stable(current_tinfo);
19651 }
19652
19653 #else /* !__ASSEMBLY__ */
19654
19655 /* how to get the thread information struct from ASM */
19656 #define GET_THREAD_INFO(reg) \
19657- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19658- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19659-
19660-/*
19661- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19662- * a certain register (to be used in assembler memory operands).
19663- */
19664-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19665+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19666
19667 #endif
19668
19669@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
19670 extern void arch_task_cache_init(void);
19671 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19672 extern void arch_release_task_struct(struct task_struct *tsk);
19673+
19674+#define __HAVE_THREAD_FUNCTIONS
19675+#define task_thread_info(task) (&(task)->tinfo)
19676+#define task_stack_page(task) ((task)->stack)
19677+#define setup_thread_stack(p, org) do {} while (0)
19678+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19679+
19680 #endif
19681 #endif /* _ASM_X86_THREAD_INFO_H */
19682diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19683index 04905bf..49203ca 100644
19684--- a/arch/x86/include/asm/tlbflush.h
19685+++ b/arch/x86/include/asm/tlbflush.h
19686@@ -17,18 +17,44 @@
19687
19688 static inline void __native_flush_tlb(void)
19689 {
19690+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19691+ u64 descriptor[2];
19692+
19693+ descriptor[0] = PCID_KERNEL;
19694+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
19695+ return;
19696+ }
19697+
19698+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19699+ if (static_cpu_has(X86_FEATURE_PCID)) {
19700+ unsigned int cpu = raw_get_cpu();
19701+
19702+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19703+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19704+ raw_put_cpu_no_resched();
19705+ return;
19706+ }
19707+#endif
19708+
19709 native_write_cr3(native_read_cr3());
19710 }
19711
19712 static inline void __native_flush_tlb_global_irq_disabled(void)
19713 {
19714- unsigned long cr4;
19715+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19716+ u64 descriptor[2];
19717
19718- cr4 = native_read_cr4();
19719- /* clear PGE */
19720- native_write_cr4(cr4 & ~X86_CR4_PGE);
19721- /* write old PGE again and flush TLBs */
19722- native_write_cr4(cr4);
19723+ descriptor[0] = PCID_KERNEL;
19724+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19725+ } else {
19726+ unsigned long cr4;
19727+
19728+ cr4 = native_read_cr4();
19729+ /* clear PGE */
19730+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19731+ /* write old PGE again and flush TLBs */
19732+ native_write_cr4(cr4);
19733+ }
19734 }
19735
19736 static inline void __native_flush_tlb_global(void)
19737@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
19738
19739 static inline void __native_flush_tlb_single(unsigned long addr)
19740 {
19741+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19742+ u64 descriptor[2];
19743+
19744+ descriptor[0] = PCID_KERNEL;
19745+ descriptor[1] = addr;
19746+
19747+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19748+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19749+ if (addr < TASK_SIZE_MAX)
19750+ descriptor[1] += pax_user_shadow_base;
19751+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19752+ }
19753+
19754+ descriptor[0] = PCID_USER;
19755+ descriptor[1] = addr;
19756+#endif
19757+
19758+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19759+ return;
19760+ }
19761+
19762+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19763+ if (static_cpu_has(X86_FEATURE_PCID)) {
19764+ unsigned int cpu = raw_get_cpu();
19765+
19766+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19767+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19768+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19769+ raw_put_cpu_no_resched();
19770+
19771+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19772+ addr += pax_user_shadow_base;
19773+ }
19774+#endif
19775+
19776 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19777 }
19778
19779diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19780index 0d592e0..526f797 100644
19781--- a/arch/x86/include/asm/uaccess.h
19782+++ b/arch/x86/include/asm/uaccess.h
19783@@ -7,6 +7,7 @@
19784 #include <linux/compiler.h>
19785 #include <linux/thread_info.h>
19786 #include <linux/string.h>
19787+#include <linux/spinlock.h>
19788 #include <asm/asm.h>
19789 #include <asm/page.h>
19790 #include <asm/smap.h>
19791@@ -29,7 +30,12 @@
19792
19793 #define get_ds() (KERNEL_DS)
19794 #define get_fs() (current_thread_info()->addr_limit)
19795+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19796+void __set_fs(mm_segment_t x);
19797+void set_fs(mm_segment_t x);
19798+#else
19799 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19800+#endif
19801
19802 #define segment_eq(a, b) ((a).seg == (b).seg)
19803
19804@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19805 * checks that the pointer is in the user space range - after calling
19806 * this function, memory access functions may still return -EFAULT.
19807 */
19808-#define access_ok(type, addr, size) \
19809- likely(!__range_not_ok(addr, size, user_addr_max()))
19810+extern int _cond_resched(void);
19811+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19812+#define access_ok(type, addr, size) \
19813+({ \
19814+ unsigned long __size = size; \
19815+ unsigned long __addr = (unsigned long)addr; \
19816+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19817+ if (__ret_ao && __size) { \
19818+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19819+ unsigned long __end_ao = __addr + __size - 1; \
19820+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19821+ while (__addr_ao <= __end_ao) { \
19822+ char __c_ao; \
19823+ __addr_ao += PAGE_SIZE; \
19824+ if (__size > PAGE_SIZE) \
19825+ _cond_resched(); \
19826+ if (__get_user(__c_ao, (char __user *)__addr)) \
19827+ break; \
19828+ if (type != VERIFY_WRITE) { \
19829+ __addr = __addr_ao; \
19830+ continue; \
19831+ } \
19832+ if (__put_user(__c_ao, (char __user *)__addr)) \
19833+ break; \
19834+ __addr = __addr_ao; \
19835+ } \
19836+ } \
19837+ } \
19838+ __ret_ao; \
19839+})
19840
19841 /*
19842 * The exception table consists of pairs of addresses relative to the
19843@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19844 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19845 __chk_user_ptr(ptr); \
19846 might_fault(); \
19847+ pax_open_userland(); \
19848 asm volatile("call __get_user_%P3" \
19849 : "=a" (__ret_gu), "=r" (__val_gu) \
19850 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19851 (x) = (__typeof__(*(ptr))) __val_gu; \
19852+ pax_close_userland(); \
19853 __ret_gu; \
19854 })
19855
19856@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19857 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19858 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19859
19860-
19861+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19862+#define __copyuser_seg "gs;"
19863+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19864+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19865+#else
19866+#define __copyuser_seg
19867+#define __COPYUSER_SET_ES
19868+#define __COPYUSER_RESTORE_ES
19869+#endif
19870
19871 #ifdef CONFIG_X86_32
19872 #define __put_user_asm_u64(x, addr, err, errret) \
19873 asm volatile(ASM_STAC "\n" \
19874- "1: movl %%eax,0(%2)\n" \
19875- "2: movl %%edx,4(%2)\n" \
19876+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19877+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19878 "3: " ASM_CLAC "\n" \
19879 ".section .fixup,\"ax\"\n" \
19880 "4: movl %3,%0\n" \
19881@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19882
19883 #define __put_user_asm_ex_u64(x, addr) \
19884 asm volatile(ASM_STAC "\n" \
19885- "1: movl %%eax,0(%1)\n" \
19886- "2: movl %%edx,4(%1)\n" \
19887+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19888+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19889 "3: " ASM_CLAC "\n" \
19890 _ASM_EXTABLE_EX(1b, 2b) \
19891 _ASM_EXTABLE_EX(2b, 3b) \
19892@@ -257,7 +301,8 @@ extern void __put_user_8(void);
19893 __typeof__(*(ptr)) __pu_val; \
19894 __chk_user_ptr(ptr); \
19895 might_fault(); \
19896- __pu_val = x; \
19897+ __pu_val = (x); \
19898+ pax_open_userland(); \
19899 switch (sizeof(*(ptr))) { \
19900 case 1: \
19901 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19902@@ -275,6 +320,7 @@ extern void __put_user_8(void);
19903 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19904 break; \
19905 } \
19906+ pax_close_userland(); \
19907 __ret_pu; \
19908 })
19909
19910@@ -355,8 +401,10 @@ do { \
19911 } while (0)
19912
19913 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19914+do { \
19915+ pax_open_userland(); \
19916 asm volatile(ASM_STAC "\n" \
19917- "1: mov"itype" %2,%"rtype"1\n" \
19918+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19919 "2: " ASM_CLAC "\n" \
19920 ".section .fixup,\"ax\"\n" \
19921 "3: mov %3,%0\n" \
19922@@ -364,8 +412,10 @@ do { \
19923 " jmp 2b\n" \
19924 ".previous\n" \
19925 _ASM_EXTABLE(1b, 3b) \
19926- : "=r" (err), ltype(x) \
19927- : "m" (__m(addr)), "i" (errret), "0" (err))
19928+ : "=r" (err), ltype (x) \
19929+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19930+ pax_close_userland(); \
19931+} while (0)
19932
19933 #define __get_user_size_ex(x, ptr, size) \
19934 do { \
19935@@ -389,7 +439,7 @@ do { \
19936 } while (0)
19937
19938 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19939- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19940+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19941 "2:\n" \
19942 _ASM_EXTABLE_EX(1b, 2b) \
19943 : ltype(x) : "m" (__m(addr)))
19944@@ -406,13 +456,24 @@ do { \
19945 int __gu_err; \
19946 unsigned long __gu_val; \
19947 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19948- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19949+ (x) = (__typeof__(*(ptr)))__gu_val; \
19950 __gu_err; \
19951 })
19952
19953 /* FIXME: this hack is definitely wrong -AK */
19954 struct __large_struct { unsigned long buf[100]; };
19955-#define __m(x) (*(struct __large_struct __user *)(x))
19956+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19957+#define ____m(x) \
19958+({ \
19959+ unsigned long ____x = (unsigned long)(x); \
19960+ if (____x < pax_user_shadow_base) \
19961+ ____x += pax_user_shadow_base; \
19962+ (typeof(x))____x; \
19963+})
19964+#else
19965+#define ____m(x) (x)
19966+#endif
19967+#define __m(x) (*(struct __large_struct __user *)____m(x))
19968
19969 /*
19970 * Tell gcc we read from memory instead of writing: this is because
19971@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
19972 * aliasing issues.
19973 */
19974 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19975+do { \
19976+ pax_open_userland(); \
19977 asm volatile(ASM_STAC "\n" \
19978- "1: mov"itype" %"rtype"1,%2\n" \
19979+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19980 "2: " ASM_CLAC "\n" \
19981 ".section .fixup,\"ax\"\n" \
19982 "3: mov %3,%0\n" \
19983@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
19984 ".previous\n" \
19985 _ASM_EXTABLE(1b, 3b) \
19986 : "=r"(err) \
19987- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19988+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19989+ pax_close_userland(); \
19990+} while (0)
19991
19992 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19993- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19994+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19995 "2:\n" \
19996 _ASM_EXTABLE_EX(1b, 2b) \
19997 : : ltype(x), "m" (__m(addr)))
19998@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
19999 */
20000 #define uaccess_try do { \
20001 current_thread_info()->uaccess_err = 0; \
20002+ pax_open_userland(); \
20003 stac(); \
20004 barrier();
20005
20006 #define uaccess_catch(err) \
20007 clac(); \
20008+ pax_close_userland(); \
20009 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
20010 } while (0)
20011
20012@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
20013 * On error, the variable @x is set to zero.
20014 */
20015
20016+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20017+#define __get_user(x, ptr) get_user((x), (ptr))
20018+#else
20019 #define __get_user(x, ptr) \
20020 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
20021+#endif
20022
20023 /**
20024 * __put_user: - Write a simple value into user space, with less checking.
20025@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
20026 * Returns zero on success, or -EFAULT on error.
20027 */
20028
20029+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20030+#define __put_user(x, ptr) put_user((x), (ptr))
20031+#else
20032 #define __put_user(x, ptr) \
20033 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
20034+#endif
20035
20036 #define __get_user_unaligned __get_user
20037 #define __put_user_unaligned __put_user
20038@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
20039 #define get_user_ex(x, ptr) do { \
20040 unsigned long __gue_val; \
20041 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
20042- (x) = (__force __typeof__(*(ptr)))__gue_val; \
20043+ (x) = (__typeof__(*(ptr)))__gue_val; \
20044 } while (0)
20045
20046 #define put_user_try uaccess_try
20047@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
20048 __typeof__(ptr) __uval = (uval); \
20049 __typeof__(*(ptr)) __old = (old); \
20050 __typeof__(*(ptr)) __new = (new); \
20051+ pax_open_userland(); \
20052 switch (size) { \
20053 case 1: \
20054 { \
20055 asm volatile("\t" ASM_STAC "\n" \
20056- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
20057+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
20058 "2:\t" ASM_CLAC "\n" \
20059 "\t.section .fixup, \"ax\"\n" \
20060 "3:\tmov %3, %0\n" \
20061 "\tjmp 2b\n" \
20062 "\t.previous\n" \
20063 _ASM_EXTABLE(1b, 3b) \
20064- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20065+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20066 : "i" (-EFAULT), "q" (__new), "1" (__old) \
20067 : "memory" \
20068 ); \
20069@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
20070 case 2: \
20071 { \
20072 asm volatile("\t" ASM_STAC "\n" \
20073- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
20074+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
20075 "2:\t" ASM_CLAC "\n" \
20076 "\t.section .fixup, \"ax\"\n" \
20077 "3:\tmov %3, %0\n" \
20078 "\tjmp 2b\n" \
20079 "\t.previous\n" \
20080 _ASM_EXTABLE(1b, 3b) \
20081- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20082+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20083 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20084 : "memory" \
20085 ); \
20086@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
20087 case 4: \
20088 { \
20089 asm volatile("\t" ASM_STAC "\n" \
20090- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
20091+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
20092 "2:\t" ASM_CLAC "\n" \
20093 "\t.section .fixup, \"ax\"\n" \
20094 "3:\tmov %3, %0\n" \
20095 "\tjmp 2b\n" \
20096 "\t.previous\n" \
20097 _ASM_EXTABLE(1b, 3b) \
20098- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20099+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20100 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20101 : "memory" \
20102 ); \
20103@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
20104 __cmpxchg_wrong_size(); \
20105 \
20106 asm volatile("\t" ASM_STAC "\n" \
20107- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
20108+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
20109 "2:\t" ASM_CLAC "\n" \
20110 "\t.section .fixup, \"ax\"\n" \
20111 "3:\tmov %3, %0\n" \
20112 "\tjmp 2b\n" \
20113 "\t.previous\n" \
20114 _ASM_EXTABLE(1b, 3b) \
20115- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20116+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20117 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20118 : "memory" \
20119 ); \
20120@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
20121 default: \
20122 __cmpxchg_wrong_size(); \
20123 } \
20124+ pax_close_userland(); \
20125 *__uval = __old; \
20126 __ret; \
20127 })
20128@@ -636,17 +713,6 @@ extern struct movsl_mask {
20129
20130 #define ARCH_HAS_NOCACHE_UACCESS 1
20131
20132-#ifdef CONFIG_X86_32
20133-# include <asm/uaccess_32.h>
20134-#else
20135-# include <asm/uaccess_64.h>
20136-#endif
20137-
20138-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
20139- unsigned n);
20140-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20141- unsigned n);
20142-
20143 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
20144 # define copy_user_diag __compiletime_error
20145 #else
20146@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20147 extern void copy_user_diag("copy_from_user() buffer size is too small")
20148 copy_from_user_overflow(void);
20149 extern void copy_user_diag("copy_to_user() buffer size is too small")
20150-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20151+copy_to_user_overflow(void);
20152
20153 #undef copy_user_diag
20154
20155@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
20156
20157 extern void
20158 __compiletime_warning("copy_to_user() buffer size is not provably correct")
20159-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20160+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
20161 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
20162
20163 #else
20164@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
20165
20166 #endif
20167
20168+#ifdef CONFIG_X86_32
20169+# include <asm/uaccess_32.h>
20170+#else
20171+# include <asm/uaccess_64.h>
20172+#endif
20173+
20174 static inline unsigned long __must_check
20175 copy_from_user(void *to, const void __user *from, unsigned long n)
20176 {
20177- int sz = __compiletime_object_size(to);
20178+ size_t sz = __compiletime_object_size(to);
20179
20180 might_fault();
20181
20182@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20183 * case, and do only runtime checking for non-constant sizes.
20184 */
20185
20186- if (likely(sz < 0 || sz >= n))
20187- n = _copy_from_user(to, from, n);
20188- else if(__builtin_constant_p(n))
20189- copy_from_user_overflow();
20190- else
20191- __copy_from_user_overflow(sz, n);
20192+ if (likely(sz != (size_t)-1 && sz < n)) {
20193+ if(__builtin_constant_p(n))
20194+ copy_from_user_overflow();
20195+ else
20196+ __copy_from_user_overflow(sz, n);
20197+ } else if (access_ok(VERIFY_READ, from, n))
20198+ n = __copy_from_user(to, from, n);
20199+ else if ((long)n > 0)
20200+ memset(to, 0, n);
20201
20202 return n;
20203 }
20204@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20205 static inline unsigned long __must_check
20206 copy_to_user(void __user *to, const void *from, unsigned long n)
20207 {
20208- int sz = __compiletime_object_size(from);
20209+ size_t sz = __compiletime_object_size(from);
20210
20211 might_fault();
20212
20213 /* See the comment in copy_from_user() above. */
20214- if (likely(sz < 0 || sz >= n))
20215- n = _copy_to_user(to, from, n);
20216- else if(__builtin_constant_p(n))
20217- copy_to_user_overflow();
20218- else
20219- __copy_to_user_overflow(sz, n);
20220+ if (likely(sz != (size_t)-1 && sz < n)) {
20221+ if(__builtin_constant_p(n))
20222+ copy_to_user_overflow();
20223+ else
20224+ __copy_to_user_overflow(sz, n);
20225+ } else if (access_ok(VERIFY_WRITE, to, n))
20226+ n = __copy_to_user(to, from, n);
20227
20228 return n;
20229 }
20230diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20231index 3c03a5d..1071638 100644
20232--- a/arch/x86/include/asm/uaccess_32.h
20233+++ b/arch/x86/include/asm/uaccess_32.h
20234@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20235 static __always_inline unsigned long __must_check
20236 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20237 {
20238+ if ((long)n < 0)
20239+ return n;
20240+
20241+ check_object_size(from, n, true);
20242+
20243 if (__builtin_constant_p(n)) {
20244 unsigned long ret;
20245
20246@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20247 __copy_to_user(void __user *to, const void *from, unsigned long n)
20248 {
20249 might_fault();
20250+
20251 return __copy_to_user_inatomic(to, from, n);
20252 }
20253
20254 static __always_inline unsigned long
20255 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20256 {
20257+ if ((long)n < 0)
20258+ return n;
20259+
20260 /* Avoid zeroing the tail if the copy fails..
20261 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20262 * but as the zeroing behaviour is only significant when n is not
20263@@ -137,6 +146,12 @@ static __always_inline unsigned long
20264 __copy_from_user(void *to, const void __user *from, unsigned long n)
20265 {
20266 might_fault();
20267+
20268+ if ((long)n < 0)
20269+ return n;
20270+
20271+ check_object_size(to, n, false);
20272+
20273 if (__builtin_constant_p(n)) {
20274 unsigned long ret;
20275
20276@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20277 const void __user *from, unsigned long n)
20278 {
20279 might_fault();
20280+
20281+ if ((long)n < 0)
20282+ return n;
20283+
20284 if (__builtin_constant_p(n)) {
20285 unsigned long ret;
20286
20287@@ -181,7 +200,10 @@ static __always_inline unsigned long
20288 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20289 unsigned long n)
20290 {
20291- return __copy_from_user_ll_nocache_nozero(to, from, n);
20292+ if ((long)n < 0)
20293+ return n;
20294+
20295+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20296 }
20297
20298 #endif /* _ASM_X86_UACCESS_32_H */
20299diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20300index 12a26b9..206c200 100644
20301--- a/arch/x86/include/asm/uaccess_64.h
20302+++ b/arch/x86/include/asm/uaccess_64.h
20303@@ -10,6 +10,9 @@
20304 #include <asm/alternative.h>
20305 #include <asm/cpufeature.h>
20306 #include <asm/page.h>
20307+#include <asm/pgtable.h>
20308+
20309+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20310
20311 /*
20312 * Copy To/From Userspace
20313@@ -17,14 +20,14 @@
20314
20315 /* Handles exceptions in both to and from, but doesn't do access_ok */
20316 __must_check unsigned long
20317-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
20318+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
20319 __must_check unsigned long
20320-copy_user_generic_string(void *to, const void *from, unsigned len);
20321+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
20322 __must_check unsigned long
20323-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20324+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
20325
20326 static __always_inline __must_check unsigned long
20327-copy_user_generic(void *to, const void *from, unsigned len)
20328+copy_user_generic(void *to, const void *from, unsigned long len)
20329 {
20330 unsigned ret;
20331
20332@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20333 }
20334
20335 __must_check unsigned long
20336-copy_in_user(void __user *to, const void __user *from, unsigned len);
20337+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20338
20339 static __always_inline __must_check
20340-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20341+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20342 {
20343- int ret = 0;
20344+ size_t sz = __compiletime_object_size(dst);
20345+ unsigned ret = 0;
20346+
20347+ if (size > INT_MAX)
20348+ return size;
20349+
20350+ check_object_size(dst, size, false);
20351+
20352+#ifdef CONFIG_PAX_MEMORY_UDEREF
20353+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20354+ return size;
20355+#endif
20356+
20357+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20358+ if(__builtin_constant_p(size))
20359+ copy_from_user_overflow();
20360+ else
20361+ __copy_from_user_overflow(sz, size);
20362+ return size;
20363+ }
20364
20365 if (!__builtin_constant_p(size))
20366- return copy_user_generic(dst, (__force void *)src, size);
20367+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20368 switch (size) {
20369- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20370+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20371 ret, "b", "b", "=q", 1);
20372 return ret;
20373- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20374+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20375 ret, "w", "w", "=r", 2);
20376 return ret;
20377- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20378+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20379 ret, "l", "k", "=r", 4);
20380 return ret;
20381- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20382+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20383 ret, "q", "", "=r", 8);
20384 return ret;
20385 case 10:
20386- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20387+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20388 ret, "q", "", "=r", 10);
20389 if (unlikely(ret))
20390 return ret;
20391 __get_user_asm(*(u16 *)(8 + (char *)dst),
20392- (u16 __user *)(8 + (char __user *)src),
20393+ (const u16 __user *)(8 + (const char __user *)src),
20394 ret, "w", "w", "=r", 2);
20395 return ret;
20396 case 16:
20397- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20398+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20399 ret, "q", "", "=r", 16);
20400 if (unlikely(ret))
20401 return ret;
20402 __get_user_asm(*(u64 *)(8 + (char *)dst),
20403- (u64 __user *)(8 + (char __user *)src),
20404+ (const u64 __user *)(8 + (const char __user *)src),
20405 ret, "q", "", "=r", 8);
20406 return ret;
20407 default:
20408- return copy_user_generic(dst, (__force void *)src, size);
20409+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20410 }
20411 }
20412
20413 static __always_inline __must_check
20414-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20415+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20416 {
20417 might_fault();
20418 return __copy_from_user_nocheck(dst, src, size);
20419 }
20420
20421 static __always_inline __must_check
20422-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20423+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20424 {
20425- int ret = 0;
20426+ size_t sz = __compiletime_object_size(src);
20427+ unsigned ret = 0;
20428+
20429+ if (size > INT_MAX)
20430+ return size;
20431+
20432+ check_object_size(src, size, true);
20433+
20434+#ifdef CONFIG_PAX_MEMORY_UDEREF
20435+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20436+ return size;
20437+#endif
20438+
20439+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20440+ if(__builtin_constant_p(size))
20441+ copy_to_user_overflow();
20442+ else
20443+ __copy_to_user_overflow(sz, size);
20444+ return size;
20445+ }
20446
20447 if (!__builtin_constant_p(size))
20448- return copy_user_generic((__force void *)dst, src, size);
20449+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20450 switch (size) {
20451- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20452+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20453 ret, "b", "b", "iq", 1);
20454 return ret;
20455- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20456+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20457 ret, "w", "w", "ir", 2);
20458 return ret;
20459- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20460+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20461 ret, "l", "k", "ir", 4);
20462 return ret;
20463- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20464+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20465 ret, "q", "", "er", 8);
20466 return ret;
20467 case 10:
20468- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20469+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20470 ret, "q", "", "er", 10);
20471 if (unlikely(ret))
20472 return ret;
20473 asm("":::"memory");
20474- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20475+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20476 ret, "w", "w", "ir", 2);
20477 return ret;
20478 case 16:
20479- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20480+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20481 ret, "q", "", "er", 16);
20482 if (unlikely(ret))
20483 return ret;
20484 asm("":::"memory");
20485- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20486+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20487 ret, "q", "", "er", 8);
20488 return ret;
20489 default:
20490- return copy_user_generic((__force void *)dst, src, size);
20491+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20492 }
20493 }
20494
20495 static __always_inline __must_check
20496-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20497+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20498 {
20499 might_fault();
20500 return __copy_to_user_nocheck(dst, src, size);
20501 }
20502
20503 static __always_inline __must_check
20504-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20505+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20506 {
20507- int ret = 0;
20508+ unsigned ret = 0;
20509
20510 might_fault();
20511+
20512+ if (size > INT_MAX)
20513+ return size;
20514+
20515+#ifdef CONFIG_PAX_MEMORY_UDEREF
20516+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20517+ return size;
20518+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20519+ return size;
20520+#endif
20521+
20522 if (!__builtin_constant_p(size))
20523- return copy_user_generic((__force void *)dst,
20524- (__force void *)src, size);
20525+ return copy_user_generic((__force_kernel void *)____m(dst),
20526+ (__force_kernel const void *)____m(src), size);
20527 switch (size) {
20528 case 1: {
20529 u8 tmp;
20530- __get_user_asm(tmp, (u8 __user *)src,
20531+ __get_user_asm(tmp, (const u8 __user *)src,
20532 ret, "b", "b", "=q", 1);
20533 if (likely(!ret))
20534 __put_user_asm(tmp, (u8 __user *)dst,
20535@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20536 }
20537 case 2: {
20538 u16 tmp;
20539- __get_user_asm(tmp, (u16 __user *)src,
20540+ __get_user_asm(tmp, (const u16 __user *)src,
20541 ret, "w", "w", "=r", 2);
20542 if (likely(!ret))
20543 __put_user_asm(tmp, (u16 __user *)dst,
20544@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20545
20546 case 4: {
20547 u32 tmp;
20548- __get_user_asm(tmp, (u32 __user *)src,
20549+ __get_user_asm(tmp, (const u32 __user *)src,
20550 ret, "l", "k", "=r", 4);
20551 if (likely(!ret))
20552 __put_user_asm(tmp, (u32 __user *)dst,
20553@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20554 }
20555 case 8: {
20556 u64 tmp;
20557- __get_user_asm(tmp, (u64 __user *)src,
20558+ __get_user_asm(tmp, (const u64 __user *)src,
20559 ret, "q", "", "=r", 8);
20560 if (likely(!ret))
20561 __put_user_asm(tmp, (u64 __user *)dst,
20562@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20563 return ret;
20564 }
20565 default:
20566- return copy_user_generic((__force void *)dst,
20567- (__force void *)src, size);
20568+ return copy_user_generic((__force_kernel void *)____m(dst),
20569+ (__force_kernel const void *)____m(src), size);
20570 }
20571 }
20572
20573-static __must_check __always_inline int
20574-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20575+static __must_check __always_inline unsigned long
20576+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20577 {
20578 return __copy_from_user_nocheck(dst, src, size);
20579 }
20580
20581-static __must_check __always_inline int
20582-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20583+static __must_check __always_inline unsigned long
20584+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20585 {
20586 return __copy_to_user_nocheck(dst, src, size);
20587 }
20588
20589-extern long __copy_user_nocache(void *dst, const void __user *src,
20590- unsigned size, int zerorest);
20591+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20592+ unsigned long size, int zerorest);
20593
20594-static inline int
20595-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20596+static inline unsigned long
20597+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20598 {
20599 might_fault();
20600+
20601+ if (size > INT_MAX)
20602+ return size;
20603+
20604+#ifdef CONFIG_PAX_MEMORY_UDEREF
20605+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20606+ return size;
20607+#endif
20608+
20609 return __copy_user_nocache(dst, src, size, 1);
20610 }
20611
20612-static inline int
20613+static inline unsigned long
20614 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20615- unsigned size)
20616+ unsigned long size)
20617 {
20618+ if (size > INT_MAX)
20619+ return size;
20620+
20621+#ifdef CONFIG_PAX_MEMORY_UDEREF
20622+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20623+ return size;
20624+#endif
20625+
20626 return __copy_user_nocache(dst, src, size, 0);
20627 }
20628
20629 unsigned long
20630-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
20631+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
20632
20633 #endif /* _ASM_X86_UACCESS_64_H */
20634diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20635index 5b238981..77fdd78 100644
20636--- a/arch/x86/include/asm/word-at-a-time.h
20637+++ b/arch/x86/include/asm/word-at-a-time.h
20638@@ -11,7 +11,7 @@
20639 * and shift, for example.
20640 */
20641 struct word_at_a_time {
20642- const unsigned long one_bits, high_bits;
20643+ unsigned long one_bits, high_bits;
20644 };
20645
20646 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20647diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20648index e45e4da..44e8572 100644
20649--- a/arch/x86/include/asm/x86_init.h
20650+++ b/arch/x86/include/asm/x86_init.h
20651@@ -129,7 +129,7 @@ struct x86_init_ops {
20652 struct x86_init_timers timers;
20653 struct x86_init_iommu iommu;
20654 struct x86_init_pci pci;
20655-};
20656+} __no_const;
20657
20658 /**
20659 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20660@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20661 void (*setup_percpu_clockev)(void);
20662 void (*early_percpu_clock_init)(void);
20663 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20664-};
20665+} __no_const;
20666
20667 struct timespec;
20668
20669@@ -168,7 +168,7 @@ struct x86_platform_ops {
20670 void (*save_sched_clock_state)(void);
20671 void (*restore_sched_clock_state)(void);
20672 void (*apic_post_init)(void);
20673-};
20674+} __no_const;
20675
20676 struct pci_dev;
20677 struct msi_msg;
20678@@ -185,7 +185,7 @@ struct x86_msi_ops {
20679 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20680 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
20681 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
20682-};
20683+} __no_const;
20684
20685 struct IO_APIC_route_entry;
20686 struct io_apic_irq_attr;
20687@@ -206,7 +206,7 @@ struct x86_io_apic_ops {
20688 unsigned int destination, int vector,
20689 struct io_apic_irq_attr *attr);
20690 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20691-};
20692+} __no_const;
20693
20694 extern struct x86_init_ops x86_init;
20695 extern struct x86_cpuinit_ops x86_cpuinit;
20696diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20697index c949923..c22bfa4 100644
20698--- a/arch/x86/include/asm/xen/page.h
20699+++ b/arch/x86/include/asm/xen/page.h
20700@@ -63,7 +63,7 @@ extern int m2p_remove_override(struct page *page,
20701 extern struct page *m2p_find_override(unsigned long mfn);
20702 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
20703
20704-static inline unsigned long pfn_to_mfn(unsigned long pfn)
20705+static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
20706 {
20707 unsigned long mfn;
20708
20709diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20710index d949ef2..479b9d1 100644
20711--- a/arch/x86/include/asm/xsave.h
20712+++ b/arch/x86/include/asm/xsave.h
20713@@ -82,8 +82,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20714 if (unlikely(err))
20715 return -EFAULT;
20716
20717+ pax_open_userland();
20718 __asm__ __volatile__(ASM_STAC "\n"
20719- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
20720+ "1:"
20721+ __copyuser_seg
20722+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
20723 "2: " ASM_CLAC "\n"
20724 ".section .fixup,\"ax\"\n"
20725 "3: movl $-1,%[err]\n"
20726@@ -93,18 +96,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20727 : [err] "=r" (err)
20728 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20729 : "memory");
20730+ pax_close_userland();
20731 return err;
20732 }
20733
20734 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20735 {
20736 int err;
20737- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20738+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20739 u32 lmask = mask;
20740 u32 hmask = mask >> 32;
20741
20742+ pax_open_userland();
20743 __asm__ __volatile__(ASM_STAC "\n"
20744- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
20745+ "1:"
20746+ __copyuser_seg
20747+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
20748 "2: " ASM_CLAC "\n"
20749 ".section .fixup,\"ax\"\n"
20750 "3: movl $-1,%[err]\n"
20751@@ -114,6 +121,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20752 : [err] "=r" (err)
20753 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20754 : "memory"); /* memory required? */
20755+ pax_close_userland();
20756 return err;
20757 }
20758
20759diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20760index bbae024..e1528f9 100644
20761--- a/arch/x86/include/uapi/asm/e820.h
20762+++ b/arch/x86/include/uapi/asm/e820.h
20763@@ -63,7 +63,7 @@ struct e820map {
20764 #define ISA_START_ADDRESS 0xa0000
20765 #define ISA_END_ADDRESS 0x100000
20766
20767-#define BIOS_BEGIN 0x000a0000
20768+#define BIOS_BEGIN 0x000c0000
20769 #define BIOS_END 0x00100000
20770
20771 #define BIOS_ROM_BASE 0xffe00000
20772diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20773index 7b0a55a..ad115bf 100644
20774--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20775+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20776@@ -49,7 +49,6 @@
20777 #define EFLAGS 144
20778 #define RSP 152
20779 #define SS 160
20780-#define ARGOFFSET R11
20781 #endif /* __ASSEMBLY__ */
20782
20783 /* top of stack page */
20784diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20785index 047f9ff..4ba5ea6 100644
20786--- a/arch/x86/kernel/Makefile
20787+++ b/arch/x86/kernel/Makefile
20788@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20789 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20790 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20791 obj-y += probe_roms.o
20792-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20793+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20794 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20795 obj-$(CONFIG_X86_64) += mcount_64.o
20796 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20797diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20798index 86281ff..e046fc2 100644
20799--- a/arch/x86/kernel/acpi/boot.c
20800+++ b/arch/x86/kernel/acpi/boot.c
20801@@ -1296,7 +1296,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
20802 * If your system is blacklisted here, but you find that acpi=force
20803 * works for you, please contact linux-acpi@vger.kernel.org
20804 */
20805-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20806+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20807 /*
20808 * Boxes that need ACPI disabled
20809 */
20810@@ -1371,7 +1371,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20811 };
20812
20813 /* second table for DMI checks that should run after early-quirks */
20814-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20815+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20816 /*
20817 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20818 * which includes some code which overrides all temperature
20819diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20820index 3136820..e2c6577 100644
20821--- a/arch/x86/kernel/acpi/sleep.c
20822+++ b/arch/x86/kernel/acpi/sleep.c
20823@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20824 #else /* CONFIG_64BIT */
20825 #ifdef CONFIG_SMP
20826 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20827+
20828+ pax_open_kernel();
20829 early_gdt_descr.address =
20830 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20831+ pax_close_kernel();
20832+
20833 initial_gs = per_cpu_offset(smp_processor_id());
20834 #endif
20835 initial_code = (unsigned long)wakeup_long64;
20836diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20837index 665c6b7..eae4d56 100644
20838--- a/arch/x86/kernel/acpi/wakeup_32.S
20839+++ b/arch/x86/kernel/acpi/wakeup_32.S
20840@@ -29,13 +29,11 @@ wakeup_pmode_return:
20841 # and restore the stack ... but you need gdt for this to work
20842 movl saved_context_esp, %esp
20843
20844- movl %cs:saved_magic, %eax
20845- cmpl $0x12345678, %eax
20846+ cmpl $0x12345678, saved_magic
20847 jne bogus_magic
20848
20849 # jump to place where we left off
20850- movl saved_eip, %eax
20851- jmp *%eax
20852+ jmp *(saved_eip)
20853
20854 bogus_magic:
20855 jmp bogus_magic
20856diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20857index 703130f..27a155d 100644
20858--- a/arch/x86/kernel/alternative.c
20859+++ b/arch/x86/kernel/alternative.c
20860@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20861 */
20862 for (a = start; a < end; a++) {
20863 instr = (u8 *)&a->instr_offset + a->instr_offset;
20864+
20865+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20866+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20867+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20868+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20869+#endif
20870+
20871 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20872 BUG_ON(a->replacementlen > a->instrlen);
20873 BUG_ON(a->instrlen > sizeof(insnbuf));
20874@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20875 add_nops(insnbuf + a->replacementlen,
20876 a->instrlen - a->replacementlen);
20877
20878+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20879+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20880+ instr = ktva_ktla(instr);
20881+#endif
20882+
20883 text_poke_early(instr, insnbuf, a->instrlen);
20884 }
20885 }
20886@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20887 for (poff = start; poff < end; poff++) {
20888 u8 *ptr = (u8 *)poff + *poff;
20889
20890+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20891+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20892+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20893+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20894+#endif
20895+
20896 if (!*poff || ptr < text || ptr >= text_end)
20897 continue;
20898 /* turn DS segment override prefix into lock prefix */
20899- if (*ptr == 0x3e)
20900+ if (*ktla_ktva(ptr) == 0x3e)
20901 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20902 }
20903 mutex_unlock(&text_mutex);
20904@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20905 for (poff = start; poff < end; poff++) {
20906 u8 *ptr = (u8 *)poff + *poff;
20907
20908+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20909+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20910+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20911+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20912+#endif
20913+
20914 if (!*poff || ptr < text || ptr >= text_end)
20915 continue;
20916 /* turn lock prefix into DS segment override prefix */
20917- if (*ptr == 0xf0)
20918+ if (*ktla_ktva(ptr) == 0xf0)
20919 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20920 }
20921 mutex_unlock(&text_mutex);
20922@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20923
20924 BUG_ON(p->len > MAX_PATCH_LEN);
20925 /* prep the buffer with the original instructions */
20926- memcpy(insnbuf, p->instr, p->len);
20927+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20928 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20929 (unsigned long)p->instr, p->len);
20930
20931@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20932 if (!uniproc_patched || num_possible_cpus() == 1)
20933 free_init_pages("SMP alternatives",
20934 (unsigned long)__smp_locks,
20935- (unsigned long)__smp_locks_end);
20936+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20937 #endif
20938
20939 apply_paravirt(__parainstructions, __parainstructions_end);
20940@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20941 * instructions. And on the local CPU you need to be protected again NMI or MCE
20942 * handlers seeing an inconsistent instruction while you patch.
20943 */
20944-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20945+void *__kprobes text_poke_early(void *addr, const void *opcode,
20946 size_t len)
20947 {
20948 unsigned long flags;
20949 local_irq_save(flags);
20950- memcpy(addr, opcode, len);
20951+
20952+ pax_open_kernel();
20953+ memcpy(ktla_ktva(addr), opcode, len);
20954 sync_core();
20955+ pax_close_kernel();
20956+
20957 local_irq_restore(flags);
20958 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20959 that causes hangs on some VIA CPUs. */
20960@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20961 */
20962 void *text_poke(void *addr, const void *opcode, size_t len)
20963 {
20964- unsigned long flags;
20965- char *vaddr;
20966+ unsigned char *vaddr = ktla_ktva(addr);
20967 struct page *pages[2];
20968- int i;
20969+ size_t i;
20970
20971 if (!core_kernel_text((unsigned long)addr)) {
20972- pages[0] = vmalloc_to_page(addr);
20973- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20974+ pages[0] = vmalloc_to_page(vaddr);
20975+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20976 } else {
20977- pages[0] = virt_to_page(addr);
20978+ pages[0] = virt_to_page(vaddr);
20979 WARN_ON(!PageReserved(pages[0]));
20980- pages[1] = virt_to_page(addr + PAGE_SIZE);
20981+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20982 }
20983 BUG_ON(!pages[0]);
20984- local_irq_save(flags);
20985- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20986- if (pages[1])
20987- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20988- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20989- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20990- clear_fixmap(FIX_TEXT_POKE0);
20991- if (pages[1])
20992- clear_fixmap(FIX_TEXT_POKE1);
20993- local_flush_tlb();
20994- sync_core();
20995- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20996- that causes hangs on some VIA CPUs. */
20997+ text_poke_early(addr, opcode, len);
20998 for (i = 0; i < len; i++)
20999- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
21000- local_irq_restore(flags);
21001+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
21002 return addr;
21003 }
21004
21005@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
21006 if (likely(!bp_patching_in_progress))
21007 return 0;
21008
21009- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
21010+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
21011 return 0;
21012
21013 /* set up the specified breakpoint handler */
21014@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
21015 */
21016 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
21017 {
21018- unsigned char int3 = 0xcc;
21019+ const unsigned char int3 = 0xcc;
21020
21021 bp_int3_handler = handler;
21022 bp_int3_addr = (u8 *)addr + sizeof(int3);
21023diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
21024index ad28db7..c538b2c 100644
21025--- a/arch/x86/kernel/apic/apic.c
21026+++ b/arch/x86/kernel/apic/apic.c
21027@@ -201,7 +201,7 @@ int first_system_vector = 0xfe;
21028 /*
21029 * Debug level, exported for io_apic.c
21030 */
21031-unsigned int apic_verbosity;
21032+int apic_verbosity;
21033
21034 int pic_mode;
21035
21036@@ -2000,7 +2000,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
21037 apic_write(APIC_ESR, 0);
21038 v = apic_read(APIC_ESR);
21039 ack_APIC_irq();
21040- atomic_inc(&irq_err_count);
21041+ atomic_inc_unchecked(&irq_err_count);
21042
21043 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
21044 smp_processor_id(), v);
21045diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
21046index 7c1b294..e71d27f 100644
21047--- a/arch/x86/kernel/apic/apic_flat_64.c
21048+++ b/arch/x86/kernel/apic/apic_flat_64.c
21049@@ -154,7 +154,7 @@ static int flat_probe(void)
21050 return 1;
21051 }
21052
21053-static struct apic apic_flat = {
21054+static struct apic apic_flat __read_only = {
21055 .name = "flat",
21056 .probe = flat_probe,
21057 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
21058@@ -268,7 +268,7 @@ static int physflat_probe(void)
21059 return 0;
21060 }
21061
21062-static struct apic apic_physflat = {
21063+static struct apic apic_physflat __read_only = {
21064
21065 .name = "physical flat",
21066 .probe = physflat_probe,
21067diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
21068index 8c7c982..a225910 100644
21069--- a/arch/x86/kernel/apic/apic_noop.c
21070+++ b/arch/x86/kernel/apic/apic_noop.c
21071@@ -118,7 +118,7 @@ static void noop_apic_write(u32 reg, u32 v)
21072 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
21073 }
21074
21075-struct apic apic_noop = {
21076+struct apic apic_noop __read_only = {
21077 .name = "noop",
21078 .probe = noop_probe,
21079 .acpi_madt_oem_check = NULL,
21080diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
21081index e4840aa..e7d9dac 100644
21082--- a/arch/x86/kernel/apic/bigsmp_32.c
21083+++ b/arch/x86/kernel/apic/bigsmp_32.c
21084@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
21085 return dmi_bigsmp;
21086 }
21087
21088-static struct apic apic_bigsmp = {
21089+static struct apic apic_bigsmp __read_only = {
21090
21091 .name = "bigsmp",
21092 .probe = probe_bigsmp,
21093diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
21094index 81e08ef..abc77e5 100644
21095--- a/arch/x86/kernel/apic/io_apic.c
21096+++ b/arch/x86/kernel/apic/io_apic.c
21097@@ -1042,7 +1042,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
21098 }
21099 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
21100
21101-void lock_vector_lock(void)
21102+void lock_vector_lock(void) __acquires(vector_lock)
21103 {
21104 /* Used to the online set of cpus does not change
21105 * during assign_irq_vector.
21106@@ -1050,7 +1050,7 @@ void lock_vector_lock(void)
21107 raw_spin_lock(&vector_lock);
21108 }
21109
21110-void unlock_vector_lock(void)
21111+void unlock_vector_lock(void) __releases(vector_lock)
21112 {
21113 raw_spin_unlock(&vector_lock);
21114 }
21115@@ -2349,7 +2349,7 @@ static void ack_apic_edge(struct irq_data *data)
21116 ack_APIC_irq();
21117 }
21118
21119-atomic_t irq_mis_count;
21120+atomic_unchecked_t irq_mis_count;
21121
21122 #ifdef CONFIG_GENERIC_PENDING_IRQ
21123 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
21124@@ -2490,7 +2490,7 @@ static void ack_apic_level(struct irq_data *data)
21125 * at the cpu.
21126 */
21127 if (!(v & (1 << (i & 0x1f)))) {
21128- atomic_inc(&irq_mis_count);
21129+ atomic_inc_unchecked(&irq_mis_count);
21130
21131 eoi_ioapic_irq(irq, cfg);
21132 }
21133diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
21134index cceb352..a635fd8 100644
21135--- a/arch/x86/kernel/apic/probe_32.c
21136+++ b/arch/x86/kernel/apic/probe_32.c
21137@@ -72,7 +72,7 @@ static int probe_default(void)
21138 return 1;
21139 }
21140
21141-static struct apic apic_default = {
21142+static struct apic apic_default __read_only = {
21143
21144 .name = "default",
21145 .probe = probe_default,
21146diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
21147index e66766b..1c008ba 100644
21148--- a/arch/x86/kernel/apic/x2apic_cluster.c
21149+++ b/arch/x86/kernel/apic/x2apic_cluster.c
21150@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
21151 return notifier_from_errno(err);
21152 }
21153
21154-static struct notifier_block __refdata x2apic_cpu_notifier = {
21155+static struct notifier_block x2apic_cpu_notifier = {
21156 .notifier_call = update_clusterinfo,
21157 };
21158
21159@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
21160 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
21161 }
21162
21163-static struct apic apic_x2apic_cluster = {
21164+static struct apic apic_x2apic_cluster __read_only = {
21165
21166 .name = "cluster x2apic",
21167 .probe = x2apic_cluster_probe,
21168diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
21169index 6d600eb..0300c00 100644
21170--- a/arch/x86/kernel/apic/x2apic_phys.c
21171+++ b/arch/x86/kernel/apic/x2apic_phys.c
21172@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
21173 return apic == &apic_x2apic_phys;
21174 }
21175
21176-static struct apic apic_x2apic_phys = {
21177+static struct apic apic_x2apic_phys __read_only = {
21178
21179 .name = "physical x2apic",
21180 .probe = x2apic_phys_probe,
21181diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
21182index 293b41d..4df25fd 100644
21183--- a/arch/x86/kernel/apic/x2apic_uv_x.c
21184+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
21185@@ -350,7 +350,7 @@ static int uv_probe(void)
21186 return apic == &apic_x2apic_uv_x;
21187 }
21188
21189-static struct apic __refdata apic_x2apic_uv_x = {
21190+static struct apic apic_x2apic_uv_x __read_only = {
21191
21192 .name = "UV large system",
21193 .probe = uv_probe,
21194diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
21195index 5848744..56cb598 100644
21196--- a/arch/x86/kernel/apm_32.c
21197+++ b/arch/x86/kernel/apm_32.c
21198@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
21199 * This is for buggy BIOS's that refer to (real mode) segment 0x40
21200 * even though they are called in protected mode.
21201 */
21202-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
21203+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
21204 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
21205
21206 static const char driver_version[] = "1.16ac"; /* no spaces */
21207@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
21208 BUG_ON(cpu != 0);
21209 gdt = get_cpu_gdt_table(cpu);
21210 save_desc_40 = gdt[0x40 / 8];
21211+
21212+ pax_open_kernel();
21213 gdt[0x40 / 8] = bad_bios_desc;
21214+ pax_close_kernel();
21215
21216 apm_irq_save(flags);
21217 APM_DO_SAVE_SEGS;
21218@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
21219 &call->esi);
21220 APM_DO_RESTORE_SEGS;
21221 apm_irq_restore(flags);
21222+
21223+ pax_open_kernel();
21224 gdt[0x40 / 8] = save_desc_40;
21225+ pax_close_kernel();
21226+
21227 put_cpu();
21228
21229 return call->eax & 0xff;
21230@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
21231 BUG_ON(cpu != 0);
21232 gdt = get_cpu_gdt_table(cpu);
21233 save_desc_40 = gdt[0x40 / 8];
21234+
21235+ pax_open_kernel();
21236 gdt[0x40 / 8] = bad_bios_desc;
21237+ pax_close_kernel();
21238
21239 apm_irq_save(flags);
21240 APM_DO_SAVE_SEGS;
21241@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
21242 &call->eax);
21243 APM_DO_RESTORE_SEGS;
21244 apm_irq_restore(flags);
21245+
21246+ pax_open_kernel();
21247 gdt[0x40 / 8] = save_desc_40;
21248+ pax_close_kernel();
21249+
21250 put_cpu();
21251 return error;
21252 }
21253@@ -2350,12 +2364,15 @@ static int __init apm_init(void)
21254 * code to that CPU.
21255 */
21256 gdt = get_cpu_gdt_table(0);
21257+
21258+ pax_open_kernel();
21259 set_desc_base(&gdt[APM_CS >> 3],
21260 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21261 set_desc_base(&gdt[APM_CS_16 >> 3],
21262 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21263 set_desc_base(&gdt[APM_DS >> 3],
21264 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21265+ pax_close_kernel();
21266
21267 proc_create("apm", 0, NULL, &apm_file_ops);
21268
21269diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21270index 9f6b934..cf5ffb3 100644
21271--- a/arch/x86/kernel/asm-offsets.c
21272+++ b/arch/x86/kernel/asm-offsets.c
21273@@ -32,6 +32,8 @@ void common(void) {
21274 OFFSET(TI_flags, thread_info, flags);
21275 OFFSET(TI_status, thread_info, status);
21276 OFFSET(TI_addr_limit, thread_info, addr_limit);
21277+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21278+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21279
21280 BLANK();
21281 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21282@@ -52,8 +54,26 @@ void common(void) {
21283 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21284 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21285 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21286+
21287+#ifdef CONFIG_PAX_KERNEXEC
21288+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21289 #endif
21290
21291+#ifdef CONFIG_PAX_MEMORY_UDEREF
21292+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21293+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21294+#ifdef CONFIG_X86_64
21295+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21296+#endif
21297+#endif
21298+
21299+#endif
21300+
21301+ BLANK();
21302+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21303+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21304+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21305+
21306 #ifdef CONFIG_XEN
21307 BLANK();
21308 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21309diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21310index e7c798b..2b2019b 100644
21311--- a/arch/x86/kernel/asm-offsets_64.c
21312+++ b/arch/x86/kernel/asm-offsets_64.c
21313@@ -77,6 +77,7 @@ int main(void)
21314 BLANK();
21315 #undef ENTRY
21316
21317+ DEFINE(TSS_size, sizeof(struct tss_struct));
21318 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21319 BLANK();
21320
21321diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21322index 7fd54f0..0691410 100644
21323--- a/arch/x86/kernel/cpu/Makefile
21324+++ b/arch/x86/kernel/cpu/Makefile
21325@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21326 CFLAGS_REMOVE_perf_event.o = -pg
21327 endif
21328
21329-# Make sure load_percpu_segment has no stackprotector
21330-nostackp := $(call cc-option, -fno-stack-protector)
21331-CFLAGS_common.o := $(nostackp)
21332-
21333 obj-y := intel_cacheinfo.o scattered.o topology.o
21334 obj-y += proc.o capflags.o powerflags.o common.o
21335 obj-y += rdrand.o
21336diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21337index ce8b8ff..d7d8851 100644
21338--- a/arch/x86/kernel/cpu/amd.c
21339+++ b/arch/x86/kernel/cpu/amd.c
21340@@ -728,7 +728,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21341 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21342 {
21343 /* AMD errata T13 (order #21922) */
21344- if ((c->x86 == 6)) {
21345+ if (c->x86 == 6) {
21346 /* Duron Rev A0 */
21347 if (c->x86_model == 3 && c->x86_mask == 0)
21348 size = 64;
21349diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21350index ef1b93f..150db65 100644
21351--- a/arch/x86/kernel/cpu/common.c
21352+++ b/arch/x86/kernel/cpu/common.c
21353@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21354
21355 static const struct cpu_dev *this_cpu = &default_cpu;
21356
21357-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21358-#ifdef CONFIG_X86_64
21359- /*
21360- * We need valid kernel segments for data and code in long mode too
21361- * IRET will check the segment types kkeil 2000/10/28
21362- * Also sysret mandates a special GDT layout
21363- *
21364- * TLS descriptors are currently at a different place compared to i386.
21365- * Hopefully nobody expects them at a fixed place (Wine?)
21366- */
21367- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21368- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21369- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21370- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21371- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21372- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21373-#else
21374- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21375- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21376- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21377- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21378- /*
21379- * Segments used for calling PnP BIOS have byte granularity.
21380- * They code segments and data segments have fixed 64k limits,
21381- * the transfer segment sizes are set at run time.
21382- */
21383- /* 32-bit code */
21384- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21385- /* 16-bit code */
21386- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21387- /* 16-bit data */
21388- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21389- /* 16-bit data */
21390- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21391- /* 16-bit data */
21392- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21393- /*
21394- * The APM segments have byte granularity and their bases
21395- * are set at run time. All have 64k limits.
21396- */
21397- /* 32-bit code */
21398- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21399- /* 16-bit code */
21400- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21401- /* data */
21402- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21403-
21404- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21405- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21406- GDT_STACK_CANARY_INIT
21407-#endif
21408-} };
21409-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21410-
21411 static int __init x86_xsave_setup(char *s)
21412 {
21413 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
21414@@ -295,6 +241,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21415 }
21416 }
21417
21418+#ifdef CONFIG_X86_64
21419+static __init int setup_disable_pcid(char *arg)
21420+{
21421+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21422+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21423+
21424+#ifdef CONFIG_PAX_MEMORY_UDEREF
21425+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21426+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21427+#endif
21428+
21429+ return 1;
21430+}
21431+__setup("nopcid", setup_disable_pcid);
21432+
21433+static void setup_pcid(struct cpuinfo_x86 *c)
21434+{
21435+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21436+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21437+
21438+#ifdef CONFIG_PAX_MEMORY_UDEREF
21439+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21440+ pax_open_kernel();
21441+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21442+ pax_close_kernel();
21443+ printk("PAX: slow and weak UDEREF enabled\n");
21444+ } else
21445+ printk("PAX: UDEREF disabled\n");
21446+#endif
21447+
21448+ return;
21449+ }
21450+
21451+ printk("PAX: PCID detected\n");
21452+ set_in_cr4(X86_CR4_PCIDE);
21453+
21454+#ifdef CONFIG_PAX_MEMORY_UDEREF
21455+ pax_open_kernel();
21456+ clone_pgd_mask = ~(pgdval_t)0UL;
21457+ pax_close_kernel();
21458+ if (pax_user_shadow_base)
21459+ printk("PAX: weak UDEREF enabled\n");
21460+ else {
21461+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21462+ printk("PAX: strong UDEREF enabled\n");
21463+ }
21464+#endif
21465+
21466+ if (cpu_has(c, X86_FEATURE_INVPCID))
21467+ printk("PAX: INVPCID detected\n");
21468+}
21469+#endif
21470+
21471 /*
21472 * Some CPU features depend on higher CPUID levels, which may not always
21473 * be available due to CPUID level capping or broken virtualization
21474@@ -395,7 +394,7 @@ void switch_to_new_gdt(int cpu)
21475 {
21476 struct desc_ptr gdt_descr;
21477
21478- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21479+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21480 gdt_descr.size = GDT_SIZE - 1;
21481 load_gdt(&gdt_descr);
21482 /* Reload the per-cpu base */
21483@@ -885,6 +884,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21484 setup_smep(c);
21485 setup_smap(c);
21486
21487+#ifdef CONFIG_X86_64
21488+ setup_pcid(c);
21489+#endif
21490+
21491 /*
21492 * The vendor-specific functions might have changed features.
21493 * Now we do "generic changes."
21494@@ -893,6 +896,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21495 /* Filter out anything that depends on CPUID levels we don't have */
21496 filter_cpuid_features(c, true);
21497
21498+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
21499+ setup_clear_cpu_cap(X86_FEATURE_SEP);
21500+#endif
21501+
21502 /* If the model name is still unset, do table lookup. */
21503 if (!c->x86_model_id[0]) {
21504 const char *p;
21505@@ -973,7 +980,7 @@ static void syscall32_cpu_init(void)
21506 void enable_sep_cpu(void)
21507 {
21508 int cpu = get_cpu();
21509- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21510+ struct tss_struct *tss = init_tss + cpu;
21511
21512 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21513 put_cpu();
21514@@ -1113,14 +1120,16 @@ static __init int setup_disablecpuid(char *arg)
21515 }
21516 __setup("clearcpuid=", setup_disablecpuid);
21517
21518+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21519+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21520+
21521 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21522- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21523+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21524 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21525
21526 #ifdef CONFIG_X86_64
21527-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21528-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21529- (unsigned long) debug_idt_table };
21530+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21531+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21532
21533 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21534 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21535@@ -1283,7 +1292,7 @@ void cpu_init(void)
21536 load_ucode_ap();
21537
21538 cpu = stack_smp_processor_id();
21539- t = &per_cpu(init_tss, cpu);
21540+ t = init_tss + cpu;
21541 oist = &per_cpu(orig_ist, cpu);
21542
21543 #ifdef CONFIG_NUMA
21544@@ -1318,7 +1327,6 @@ void cpu_init(void)
21545 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21546 barrier();
21547
21548- x86_configure_nx();
21549 enable_x2apic();
21550
21551 /*
21552@@ -1370,7 +1378,7 @@ void cpu_init(void)
21553 {
21554 int cpu = smp_processor_id();
21555 struct task_struct *curr = current;
21556- struct tss_struct *t = &per_cpu(init_tss, cpu);
21557+ struct tss_struct *t = init_tss + cpu;
21558 struct thread_struct *thread = &curr->thread;
21559
21560 show_ucode_info_early();
21561diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21562index 9c8f739..902a9c5 100644
21563--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21564+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21565@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
21566 };
21567
21568 #ifdef CONFIG_AMD_NB
21569+static struct attribute *default_attrs_amd_nb[] = {
21570+ &type.attr,
21571+ &level.attr,
21572+ &coherency_line_size.attr,
21573+ &physical_line_partition.attr,
21574+ &ways_of_associativity.attr,
21575+ &number_of_sets.attr,
21576+ &size.attr,
21577+ &shared_cpu_map.attr,
21578+ &shared_cpu_list.attr,
21579+ NULL,
21580+ NULL,
21581+ NULL,
21582+ NULL
21583+};
21584+
21585 static struct attribute **amd_l3_attrs(void)
21586 {
21587 static struct attribute **attrs;
21588@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
21589
21590 n = ARRAY_SIZE(default_attrs);
21591
21592- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21593- n += 2;
21594-
21595- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21596- n += 1;
21597-
21598- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21599- if (attrs == NULL)
21600- return attrs = default_attrs;
21601-
21602- for (n = 0; default_attrs[n]; n++)
21603- attrs[n] = default_attrs[n];
21604+ attrs = default_attrs_amd_nb;
21605
21606 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21607 attrs[n++] = &cache_disable_0.attr;
21608@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
21609 .default_attrs = default_attrs,
21610 };
21611
21612+#ifdef CONFIG_AMD_NB
21613+static struct kobj_type ktype_cache_amd_nb = {
21614+ .sysfs_ops = &sysfs_ops,
21615+ .default_attrs = default_attrs_amd_nb,
21616+};
21617+#endif
21618+
21619 static struct kobj_type ktype_percpu_entry = {
21620 .sysfs_ops = &sysfs_ops,
21621 };
21622@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
21623 return retval;
21624 }
21625
21626+#ifdef CONFIG_AMD_NB
21627+ amd_l3_attrs();
21628+#endif
21629+
21630 for (i = 0; i < num_cache_leaves; i++) {
21631+ struct kobj_type *ktype;
21632+
21633 this_object = INDEX_KOBJECT_PTR(cpu, i);
21634 this_object->cpu = cpu;
21635 this_object->index = i;
21636
21637 this_leaf = CPUID4_INFO_IDX(cpu, i);
21638
21639- ktype_cache.default_attrs = default_attrs;
21640+ ktype = &ktype_cache;
21641 #ifdef CONFIG_AMD_NB
21642 if (this_leaf->base.nb)
21643- ktype_cache.default_attrs = amd_l3_attrs();
21644+ ktype = &ktype_cache_amd_nb;
21645 #endif
21646 retval = kobject_init_and_add(&(this_object->kobj),
21647- &ktype_cache,
21648+ ktype,
21649 per_cpu(ici_cache_kobject, cpu),
21650 "index%1lu", i);
21651 if (unlikely(retval)) {
21652diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21653index 9a79c8d..158c2f1 100644
21654--- a/arch/x86/kernel/cpu/mcheck/mce.c
21655+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21656@@ -45,6 +45,7 @@
21657 #include <asm/processor.h>
21658 #include <asm/mce.h>
21659 #include <asm/msr.h>
21660+#include <asm/local.h>
21661
21662 #include "mce-internal.h"
21663
21664@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
21665 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21666 m->cs, m->ip);
21667
21668- if (m->cs == __KERNEL_CS)
21669+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21670 print_symbol("{%s}", m->ip);
21671 pr_cont("\n");
21672 }
21673@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
21674
21675 #define PANIC_TIMEOUT 5 /* 5 seconds */
21676
21677-static atomic_t mce_paniced;
21678+static atomic_unchecked_t mce_paniced;
21679
21680 static int fake_panic;
21681-static atomic_t mce_fake_paniced;
21682+static atomic_unchecked_t mce_fake_paniced;
21683
21684 /* Panic in progress. Enable interrupts and wait for final IPI */
21685 static void wait_for_panic(void)
21686@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21687 /*
21688 * Make sure only one CPU runs in machine check panic
21689 */
21690- if (atomic_inc_return(&mce_paniced) > 1)
21691+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
21692 wait_for_panic();
21693 barrier();
21694
21695@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21696 console_verbose();
21697 } else {
21698 /* Don't log too much for fake panic */
21699- if (atomic_inc_return(&mce_fake_paniced) > 1)
21700+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
21701 return;
21702 }
21703 /* First print corrected ones that are still unlogged */
21704@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21705 if (!fake_panic) {
21706 if (panic_timeout == 0)
21707 panic_timeout = mca_cfg.panic_timeout;
21708- panic(msg);
21709+ panic("%s", msg);
21710 } else
21711 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21712 }
21713@@ -697,7 +698,7 @@ static int mce_timed_out(u64 *t)
21714 * might have been modified by someone else.
21715 */
21716 rmb();
21717- if (atomic_read(&mce_paniced))
21718+ if (atomic_read_unchecked(&mce_paniced))
21719 wait_for_panic();
21720 if (!mca_cfg.monarch_timeout)
21721 goto out;
21722@@ -1674,7 +1675,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21723 }
21724
21725 /* Call the installed machine check handler for this CPU setup. */
21726-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21727+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21728 unexpected_machine_check;
21729
21730 /*
21731@@ -1697,7 +1698,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21732 return;
21733 }
21734
21735+ pax_open_kernel();
21736 machine_check_vector = do_machine_check;
21737+ pax_close_kernel();
21738
21739 __mcheck_cpu_init_generic();
21740 __mcheck_cpu_init_vendor(c);
21741@@ -1711,7 +1714,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21742 */
21743
21744 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21745-static int mce_chrdev_open_count; /* #times opened */
21746+static local_t mce_chrdev_open_count; /* #times opened */
21747 static int mce_chrdev_open_exclu; /* already open exclusive? */
21748
21749 static int mce_chrdev_open(struct inode *inode, struct file *file)
21750@@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21751 spin_lock(&mce_chrdev_state_lock);
21752
21753 if (mce_chrdev_open_exclu ||
21754- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21755+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21756 spin_unlock(&mce_chrdev_state_lock);
21757
21758 return -EBUSY;
21759@@ -1727,7 +1730,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21760
21761 if (file->f_flags & O_EXCL)
21762 mce_chrdev_open_exclu = 1;
21763- mce_chrdev_open_count++;
21764+ local_inc(&mce_chrdev_open_count);
21765
21766 spin_unlock(&mce_chrdev_state_lock);
21767
21768@@ -1738,7 +1741,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21769 {
21770 spin_lock(&mce_chrdev_state_lock);
21771
21772- mce_chrdev_open_count--;
21773+ local_dec(&mce_chrdev_open_count);
21774 mce_chrdev_open_exclu = 0;
21775
21776 spin_unlock(&mce_chrdev_state_lock);
21777@@ -2414,7 +2417,7 @@ static __init void mce_init_banks(void)
21778
21779 for (i = 0; i < mca_cfg.banks; i++) {
21780 struct mce_bank *b = &mce_banks[i];
21781- struct device_attribute *a = &b->attr;
21782+ device_attribute_no_const *a = &b->attr;
21783
21784 sysfs_attr_init(&a->attr);
21785 a->attr.name = b->attrname;
21786@@ -2521,7 +2524,7 @@ struct dentry *mce_get_debugfs_dir(void)
21787 static void mce_reset(void)
21788 {
21789 cpu_missing = 0;
21790- atomic_set(&mce_fake_paniced, 0);
21791+ atomic_set_unchecked(&mce_fake_paniced, 0);
21792 atomic_set(&mce_executing, 0);
21793 atomic_set(&mce_callin, 0);
21794 atomic_set(&global_nwo, 0);
21795diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21796index a304298..49b6d06 100644
21797--- a/arch/x86/kernel/cpu/mcheck/p5.c
21798+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21799@@ -10,6 +10,7 @@
21800 #include <asm/processor.h>
21801 #include <asm/mce.h>
21802 #include <asm/msr.h>
21803+#include <asm/pgtable.h>
21804
21805 /* By default disabled */
21806 int mce_p5_enabled __read_mostly;
21807@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21808 if (!cpu_has(c, X86_FEATURE_MCE))
21809 return;
21810
21811+ pax_open_kernel();
21812 machine_check_vector = pentium_machine_check;
21813+ pax_close_kernel();
21814 /* Make sure the vector pointer is visible before we enable MCEs: */
21815 wmb();
21816
21817diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21818index 7dc5564..1273569 100644
21819--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21820+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21821@@ -9,6 +9,7 @@
21822 #include <asm/processor.h>
21823 #include <asm/mce.h>
21824 #include <asm/msr.h>
21825+#include <asm/pgtable.h>
21826
21827 /* Machine check handler for WinChip C6: */
21828 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21829@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21830 {
21831 u32 lo, hi;
21832
21833+ pax_open_kernel();
21834 machine_check_vector = winchip_machine_check;
21835+ pax_close_kernel();
21836 /* Make sure the vector pointer is visible before we enable MCEs: */
21837 wmb();
21838
21839diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21840index dd9d619..86e1d81 100644
21841--- a/arch/x86/kernel/cpu/microcode/core.c
21842+++ b/arch/x86/kernel/cpu/microcode/core.c
21843@@ -516,7 +516,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21844 return NOTIFY_OK;
21845 }
21846
21847-static struct notifier_block __refdata mc_cpu_notifier = {
21848+static struct notifier_block mc_cpu_notifier = {
21849 .notifier_call = mc_cpu_callback,
21850 };
21851
21852diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21853index a276fa7..e66810f 100644
21854--- a/arch/x86/kernel/cpu/microcode/intel.c
21855+++ b/arch/x86/kernel/cpu/microcode/intel.c
21856@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21857
21858 static int get_ucode_user(void *to, const void *from, size_t n)
21859 {
21860- return copy_from_user(to, from, n);
21861+ return copy_from_user(to, (const void __force_user *)from, n);
21862 }
21863
21864 static enum ucode_state
21865 request_microcode_user(int cpu, const void __user *buf, size_t size)
21866 {
21867- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21868+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21869 }
21870
21871 static void microcode_fini_cpu(int cpu)
21872diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21873index f961de9..8a9d332 100644
21874--- a/arch/x86/kernel/cpu/mtrr/main.c
21875+++ b/arch/x86/kernel/cpu/mtrr/main.c
21876@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21877 u64 size_or_mask, size_and_mask;
21878 static bool mtrr_aps_delayed_init;
21879
21880-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21881+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21882
21883 const struct mtrr_ops *mtrr_if;
21884
21885diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21886index df5e41f..816c719 100644
21887--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21888+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21889@@ -25,7 +25,7 @@ struct mtrr_ops {
21890 int (*validate_add_page)(unsigned long base, unsigned long size,
21891 unsigned int type);
21892 int (*have_wrcomb)(void);
21893-};
21894+} __do_const;
21895
21896 extern int generic_get_free_region(unsigned long base, unsigned long size,
21897 int replace_reg);
21898diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21899index 2879ecd..bb8c80b 100644
21900--- a/arch/x86/kernel/cpu/perf_event.c
21901+++ b/arch/x86/kernel/cpu/perf_event.c
21902@@ -1372,7 +1372,7 @@ static void __init pmu_check_apic(void)
21903
21904 }
21905
21906-static struct attribute_group x86_pmu_format_group = {
21907+static attribute_group_no_const x86_pmu_format_group = {
21908 .name = "format",
21909 .attrs = NULL,
21910 };
21911@@ -1471,7 +1471,7 @@ static struct attribute *events_attr[] = {
21912 NULL,
21913 };
21914
21915-static struct attribute_group x86_pmu_events_group = {
21916+static attribute_group_no_const x86_pmu_events_group = {
21917 .name = "events",
21918 .attrs = events_attr,
21919 };
21920@@ -1995,7 +1995,7 @@ static unsigned long get_segment_base(unsigned int segment)
21921 if (idx > GDT_ENTRIES)
21922 return 0;
21923
21924- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
21925+ desc = get_cpu_gdt_table(smp_processor_id());
21926 }
21927
21928 return get_desc_base(desc + idx);
21929@@ -2085,7 +2085,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21930 break;
21931
21932 perf_callchain_store(entry, frame.return_address);
21933- fp = frame.next_frame;
21934+ fp = (const void __force_user *)frame.next_frame;
21935 }
21936 }
21937
21938diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21939index 639d128..e92d7e5 100644
21940--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21941+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21942@@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21943 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21944 {
21945 struct attribute **attrs;
21946- struct attribute_group *attr_group;
21947+ attribute_group_no_const *attr_group;
21948 int i = 0, j;
21949
21950 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21951diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21952index 2502d0d..e5cc05c 100644
21953--- a/arch/x86/kernel/cpu/perf_event_intel.c
21954+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21955@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21956 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21957
21958 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21959- u64 capabilities;
21960+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21961
21962- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21963- x86_pmu.intel_cap.capabilities = capabilities;
21964+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21965+ x86_pmu.intel_cap.capabilities = capabilities;
21966 }
21967
21968 intel_ds_init();
21969diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21970index 619f769..d510008 100644
21971--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21972+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21973@@ -449,7 +449,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21974 NULL,
21975 };
21976
21977-static struct attribute_group rapl_pmu_events_group = {
21978+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21979 .name = "events",
21980 .attrs = NULL, /* patched at runtime */
21981 };
21982diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21983index ae6552a..b5be2d3 100644
21984--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21985+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21986@@ -3694,7 +3694,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21987 static int __init uncore_type_init(struct intel_uncore_type *type)
21988 {
21989 struct intel_uncore_pmu *pmus;
21990- struct attribute_group *attr_group;
21991+ attribute_group_no_const *attr_group;
21992 struct attribute **attrs;
21993 int i, j;
21994
21995diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21996index 90236f0..54cb20d 100644
21997--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21998+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21999@@ -503,7 +503,7 @@ struct intel_uncore_box {
22000 struct uncore_event_desc {
22001 struct kobj_attribute attr;
22002 const char *config;
22003-};
22004+} __do_const;
22005
22006 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
22007 { \
22008diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
22009index 3225ae6c..ee3c6db 100644
22010--- a/arch/x86/kernel/cpuid.c
22011+++ b/arch/x86/kernel/cpuid.c
22012@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
22013 return notifier_from_errno(err);
22014 }
22015
22016-static struct notifier_block __refdata cpuid_class_cpu_notifier =
22017+static struct notifier_block cpuid_class_cpu_notifier =
22018 {
22019 .notifier_call = cpuid_class_cpu_callback,
22020 };
22021diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
22022index 507de80..ebaae2a 100644
22023--- a/arch/x86/kernel/crash.c
22024+++ b/arch/x86/kernel/crash.c
22025@@ -58,7 +58,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
22026 #ifdef CONFIG_X86_32
22027 struct pt_regs fixed_regs;
22028
22029- if (!user_mode_vm(regs)) {
22030+ if (!user_mode(regs)) {
22031 crash_fixup_ss_esp(&fixed_regs, regs);
22032 regs = &fixed_regs;
22033 }
22034diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
22035index afa64ad..dce67dd 100644
22036--- a/arch/x86/kernel/crash_dump_64.c
22037+++ b/arch/x86/kernel/crash_dump_64.c
22038@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
22039 return -ENOMEM;
22040
22041 if (userbuf) {
22042- if (copy_to_user(buf, vaddr + offset, csize)) {
22043+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
22044 iounmap(vaddr);
22045 return -EFAULT;
22046 }
22047diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
22048index f6dfd93..892ade4 100644
22049--- a/arch/x86/kernel/doublefault.c
22050+++ b/arch/x86/kernel/doublefault.c
22051@@ -12,7 +12,7 @@
22052
22053 #define DOUBLEFAULT_STACKSIZE (1024)
22054 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
22055-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
22056+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
22057
22058 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
22059
22060@@ -22,7 +22,7 @@ static void doublefault_fn(void)
22061 unsigned long gdt, tss;
22062
22063 native_store_gdt(&gdt_desc);
22064- gdt = gdt_desc.address;
22065+ gdt = (unsigned long)gdt_desc.address;
22066
22067 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
22068
22069@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
22070 /* 0x2 bit is always set */
22071 .flags = X86_EFLAGS_SF | 0x2,
22072 .sp = STACK_START,
22073- .es = __USER_DS,
22074+ .es = __KERNEL_DS,
22075 .cs = __KERNEL_CS,
22076 .ss = __KERNEL_DS,
22077- .ds = __USER_DS,
22078+ .ds = __KERNEL_DS,
22079 .fs = __KERNEL_PERCPU,
22080
22081 .__cr3 = __pa_nodebug(swapper_pg_dir),
22082diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
22083index b74ebc7..6dbb0c5 100644
22084--- a/arch/x86/kernel/dumpstack.c
22085+++ b/arch/x86/kernel/dumpstack.c
22086@@ -2,6 +2,9 @@
22087 * Copyright (C) 1991, 1992 Linus Torvalds
22088 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
22089 */
22090+#ifdef CONFIG_GRKERNSEC_HIDESYM
22091+#define __INCLUDED_BY_HIDESYM 1
22092+#endif
22093 #include <linux/kallsyms.h>
22094 #include <linux/kprobes.h>
22095 #include <linux/uaccess.h>
22096@@ -40,16 +43,14 @@ void printk_address(unsigned long address)
22097 static void
22098 print_ftrace_graph_addr(unsigned long addr, void *data,
22099 const struct stacktrace_ops *ops,
22100- struct thread_info *tinfo, int *graph)
22101+ struct task_struct *task, int *graph)
22102 {
22103- struct task_struct *task;
22104 unsigned long ret_addr;
22105 int index;
22106
22107 if (addr != (unsigned long)return_to_handler)
22108 return;
22109
22110- task = tinfo->task;
22111 index = task->curr_ret_stack;
22112
22113 if (!task->ret_stack || index < *graph)
22114@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22115 static inline void
22116 print_ftrace_graph_addr(unsigned long addr, void *data,
22117 const struct stacktrace_ops *ops,
22118- struct thread_info *tinfo, int *graph)
22119+ struct task_struct *task, int *graph)
22120 { }
22121 #endif
22122
22123@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22124 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
22125 */
22126
22127-static inline int valid_stack_ptr(struct thread_info *tinfo,
22128- void *p, unsigned int size, void *end)
22129+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
22130 {
22131- void *t = tinfo;
22132 if (end) {
22133 if (p < end && p >= (end-THREAD_SIZE))
22134 return 1;
22135@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
22136 }
22137
22138 unsigned long
22139-print_context_stack(struct thread_info *tinfo,
22140+print_context_stack(struct task_struct *task, void *stack_start,
22141 unsigned long *stack, unsigned long bp,
22142 const struct stacktrace_ops *ops, void *data,
22143 unsigned long *end, int *graph)
22144 {
22145 struct stack_frame *frame = (struct stack_frame *)bp;
22146
22147- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
22148+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
22149 unsigned long addr;
22150
22151 addr = *stack;
22152@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
22153 } else {
22154 ops->address(data, addr, 0);
22155 }
22156- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22157+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22158 }
22159 stack++;
22160 }
22161@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
22162 EXPORT_SYMBOL_GPL(print_context_stack);
22163
22164 unsigned long
22165-print_context_stack_bp(struct thread_info *tinfo,
22166+print_context_stack_bp(struct task_struct *task, void *stack_start,
22167 unsigned long *stack, unsigned long bp,
22168 const struct stacktrace_ops *ops, void *data,
22169 unsigned long *end, int *graph)
22170@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22171 struct stack_frame *frame = (struct stack_frame *)bp;
22172 unsigned long *ret_addr = &frame->return_address;
22173
22174- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
22175+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
22176 unsigned long addr = *ret_addr;
22177
22178 if (!__kernel_text_address(addr))
22179@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22180 ops->address(data, addr, 1);
22181 frame = frame->next_frame;
22182 ret_addr = &frame->return_address;
22183- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22184+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22185 }
22186
22187 return (unsigned long)frame;
22188@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22189 static void print_trace_address(void *data, unsigned long addr, int reliable)
22190 {
22191 touch_nmi_watchdog();
22192- printk(data);
22193+ printk("%s", (char *)data);
22194 printk_stack_address(addr, reliable);
22195 }
22196
22197@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22198 EXPORT_SYMBOL_GPL(oops_begin);
22199 NOKPROBE_SYMBOL(oops_begin);
22200
22201+extern void gr_handle_kernel_exploit(void);
22202+
22203 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22204 {
22205 if (regs && kexec_should_crash(current))
22206@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22207 panic("Fatal exception in interrupt");
22208 if (panic_on_oops)
22209 panic("Fatal exception");
22210- do_exit(signr);
22211+
22212+ gr_handle_kernel_exploit();
22213+
22214+ do_group_exit(signr);
22215 }
22216 NOKPROBE_SYMBOL(oops_end);
22217
22218@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22219 print_modules();
22220 show_regs(regs);
22221 #ifdef CONFIG_X86_32
22222- if (user_mode_vm(regs)) {
22223+ if (user_mode(regs)) {
22224 sp = regs->sp;
22225 ss = regs->ss & 0xffff;
22226 } else {
22227@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22228 unsigned long flags = oops_begin();
22229 int sig = SIGSEGV;
22230
22231- if (!user_mode_vm(regs))
22232+ if (!user_mode(regs))
22233 report_bug(regs->ip, regs);
22234
22235 if (__die(str, regs, err))
22236diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22237index 5abd4cd..c65733b 100644
22238--- a/arch/x86/kernel/dumpstack_32.c
22239+++ b/arch/x86/kernel/dumpstack_32.c
22240@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22241 bp = stack_frame(task, regs);
22242
22243 for (;;) {
22244- struct thread_info *context;
22245+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22246 void *end_stack;
22247
22248 end_stack = is_hardirq_stack(stack, cpu);
22249 if (!end_stack)
22250 end_stack = is_softirq_stack(stack, cpu);
22251
22252- context = task_thread_info(task);
22253- bp = ops->walk_stack(context, stack, bp, ops, data,
22254+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22255 end_stack, &graph);
22256
22257 /* Stop if not on irq stack */
22258@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22259 int i;
22260
22261 show_regs_print_info(KERN_EMERG);
22262- __show_regs(regs, !user_mode_vm(regs));
22263+ __show_regs(regs, !user_mode(regs));
22264
22265 /*
22266 * When in-kernel, we also print out the stack and code at the
22267 * time of the fault..
22268 */
22269- if (!user_mode_vm(regs)) {
22270+ if (!user_mode(regs)) {
22271 unsigned int code_prologue = code_bytes * 43 / 64;
22272 unsigned int code_len = code_bytes;
22273 unsigned char c;
22274 u8 *ip;
22275+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22276
22277 pr_emerg("Stack:\n");
22278 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22279
22280 pr_emerg("Code:");
22281
22282- ip = (u8 *)regs->ip - code_prologue;
22283+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22284 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22285 /* try starting at IP */
22286- ip = (u8 *)regs->ip;
22287+ ip = (u8 *)regs->ip + cs_base;
22288 code_len = code_len - code_prologue + 1;
22289 }
22290 for (i = 0; i < code_len; i++, ip++) {
22291@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22292 pr_cont(" Bad EIP value.");
22293 break;
22294 }
22295- if (ip == (u8 *)regs->ip)
22296+ if (ip == (u8 *)regs->ip + cs_base)
22297 pr_cont(" <%02x>", c);
22298 else
22299 pr_cont(" %02x", c);
22300@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22301 {
22302 unsigned short ud2;
22303
22304+ ip = ktla_ktva(ip);
22305 if (ip < PAGE_OFFSET)
22306 return 0;
22307 if (probe_kernel_address((unsigned short *)ip, ud2))
22308@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22309
22310 return ud2 == 0x0b0f;
22311 }
22312+
22313+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22314+void pax_check_alloca(unsigned long size)
22315+{
22316+ unsigned long sp = (unsigned long)&sp, stack_left;
22317+
22318+ /* all kernel stacks are of the same size */
22319+ stack_left = sp & (THREAD_SIZE - 1);
22320+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22321+}
22322+EXPORT_SYMBOL(pax_check_alloca);
22323+#endif
22324diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22325index 1abcb50..6c8d702 100644
22326--- a/arch/x86/kernel/dumpstack_64.c
22327+++ b/arch/x86/kernel/dumpstack_64.c
22328@@ -154,12 +154,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22329 const struct stacktrace_ops *ops, void *data)
22330 {
22331 const unsigned cpu = get_cpu();
22332- struct thread_info *tinfo;
22333 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22334 unsigned long dummy;
22335 unsigned used = 0;
22336 int graph = 0;
22337 int done = 0;
22338+ void *stack_start;
22339
22340 if (!task)
22341 task = current;
22342@@ -180,7 +180,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22343 * current stack address. If the stacks consist of nested
22344 * exceptions
22345 */
22346- tinfo = task_thread_info(task);
22347 while (!done) {
22348 unsigned long *stack_end;
22349 enum stack_type stype;
22350@@ -203,7 +202,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22351 if (ops->stack(data, id) < 0)
22352 break;
22353
22354- bp = ops->walk_stack(tinfo, stack, bp, ops,
22355+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22356 data, stack_end, &graph);
22357 ops->stack(data, "<EOE>");
22358 /*
22359@@ -211,6 +210,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22360 * second-to-last pointer (index -2 to end) in the
22361 * exception stack:
22362 */
22363+ if ((u16)stack_end[-1] != __KERNEL_DS)
22364+ goto out;
22365 stack = (unsigned long *) stack_end[-2];
22366 done = 0;
22367 break;
22368@@ -219,7 +220,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22369
22370 if (ops->stack(data, "IRQ") < 0)
22371 break;
22372- bp = ops->walk_stack(tinfo, stack, bp,
22373+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22374 ops, data, stack_end, &graph);
22375 /*
22376 * We link to the next stack (which would be
22377@@ -241,7 +242,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22378 /*
22379 * This handles the process stack:
22380 */
22381- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22382+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22383+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22384+out:
22385 put_cpu();
22386 }
22387 EXPORT_SYMBOL(dump_trace);
22388@@ -350,3 +353,50 @@ int is_valid_bugaddr(unsigned long ip)
22389
22390 return ud2 == 0x0b0f;
22391 }
22392+
22393+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22394+void pax_check_alloca(unsigned long size)
22395+{
22396+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22397+ unsigned cpu, used;
22398+ char *id;
22399+
22400+ /* check the process stack first */
22401+ stack_start = (unsigned long)task_stack_page(current);
22402+ stack_end = stack_start + THREAD_SIZE;
22403+ if (likely(stack_start <= sp && sp < stack_end)) {
22404+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22405+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22406+ return;
22407+ }
22408+
22409+ cpu = get_cpu();
22410+
22411+ /* check the irq stacks */
22412+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22413+ stack_start = stack_end - IRQ_STACK_SIZE;
22414+ if (stack_start <= sp && sp < stack_end) {
22415+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22416+ put_cpu();
22417+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22418+ return;
22419+ }
22420+
22421+ /* check the exception stacks */
22422+ used = 0;
22423+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22424+ stack_start = stack_end - EXCEPTION_STKSZ;
22425+ if (stack_end && stack_start <= sp && sp < stack_end) {
22426+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22427+ put_cpu();
22428+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22429+ return;
22430+ }
22431+
22432+ put_cpu();
22433+
22434+ /* unknown stack */
22435+ BUG();
22436+}
22437+EXPORT_SYMBOL(pax_check_alloca);
22438+#endif
22439diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22440index 988c00a..4f673b6 100644
22441--- a/arch/x86/kernel/e820.c
22442+++ b/arch/x86/kernel/e820.c
22443@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22444
22445 static void early_panic(char *msg)
22446 {
22447- early_printk(msg);
22448- panic(msg);
22449+ early_printk("%s", msg);
22450+ panic("%s", msg);
22451 }
22452
22453 static int userdef __initdata;
22454diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22455index 01d1c18..8073693 100644
22456--- a/arch/x86/kernel/early_printk.c
22457+++ b/arch/x86/kernel/early_printk.c
22458@@ -7,6 +7,7 @@
22459 #include <linux/pci_regs.h>
22460 #include <linux/pci_ids.h>
22461 #include <linux/errno.h>
22462+#include <linux/sched.h>
22463 #include <asm/io.h>
22464 #include <asm/processor.h>
22465 #include <asm/fcntl.h>
22466diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22467index 0d0c9d4..f65b4f6 100644
22468--- a/arch/x86/kernel/entry_32.S
22469+++ b/arch/x86/kernel/entry_32.S
22470@@ -177,13 +177,153 @@
22471 /*CFI_REL_OFFSET gs, PT_GS*/
22472 .endm
22473 .macro SET_KERNEL_GS reg
22474+
22475+#ifdef CONFIG_CC_STACKPROTECTOR
22476 movl $(__KERNEL_STACK_CANARY), \reg
22477+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22478+ movl $(__USER_DS), \reg
22479+#else
22480+ xorl \reg, \reg
22481+#endif
22482+
22483 movl \reg, %gs
22484 .endm
22485
22486 #endif /* CONFIG_X86_32_LAZY_GS */
22487
22488-.macro SAVE_ALL
22489+.macro pax_enter_kernel
22490+#ifdef CONFIG_PAX_KERNEXEC
22491+ call pax_enter_kernel
22492+#endif
22493+.endm
22494+
22495+.macro pax_exit_kernel
22496+#ifdef CONFIG_PAX_KERNEXEC
22497+ call pax_exit_kernel
22498+#endif
22499+.endm
22500+
22501+#ifdef CONFIG_PAX_KERNEXEC
22502+ENTRY(pax_enter_kernel)
22503+#ifdef CONFIG_PARAVIRT
22504+ pushl %eax
22505+ pushl %ecx
22506+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22507+ mov %eax, %esi
22508+#else
22509+ mov %cr0, %esi
22510+#endif
22511+ bts $16, %esi
22512+ jnc 1f
22513+ mov %cs, %esi
22514+ cmp $__KERNEL_CS, %esi
22515+ jz 3f
22516+ ljmp $__KERNEL_CS, $3f
22517+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22518+2:
22519+#ifdef CONFIG_PARAVIRT
22520+ mov %esi, %eax
22521+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22522+#else
22523+ mov %esi, %cr0
22524+#endif
22525+3:
22526+#ifdef CONFIG_PARAVIRT
22527+ popl %ecx
22528+ popl %eax
22529+#endif
22530+ ret
22531+ENDPROC(pax_enter_kernel)
22532+
22533+ENTRY(pax_exit_kernel)
22534+#ifdef CONFIG_PARAVIRT
22535+ pushl %eax
22536+ pushl %ecx
22537+#endif
22538+ mov %cs, %esi
22539+ cmp $__KERNEXEC_KERNEL_CS, %esi
22540+ jnz 2f
22541+#ifdef CONFIG_PARAVIRT
22542+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22543+ mov %eax, %esi
22544+#else
22545+ mov %cr0, %esi
22546+#endif
22547+ btr $16, %esi
22548+ ljmp $__KERNEL_CS, $1f
22549+1:
22550+#ifdef CONFIG_PARAVIRT
22551+ mov %esi, %eax
22552+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22553+#else
22554+ mov %esi, %cr0
22555+#endif
22556+2:
22557+#ifdef CONFIG_PARAVIRT
22558+ popl %ecx
22559+ popl %eax
22560+#endif
22561+ ret
22562+ENDPROC(pax_exit_kernel)
22563+#endif
22564+
22565+ .macro pax_erase_kstack
22566+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22567+ call pax_erase_kstack
22568+#endif
22569+ .endm
22570+
22571+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22572+/*
22573+ * ebp: thread_info
22574+ */
22575+ENTRY(pax_erase_kstack)
22576+ pushl %edi
22577+ pushl %ecx
22578+ pushl %eax
22579+
22580+ mov TI_lowest_stack(%ebp), %edi
22581+ mov $-0xBEEF, %eax
22582+ std
22583+
22584+1: mov %edi, %ecx
22585+ and $THREAD_SIZE_asm - 1, %ecx
22586+ shr $2, %ecx
22587+ repne scasl
22588+ jecxz 2f
22589+
22590+ cmp $2*16, %ecx
22591+ jc 2f
22592+
22593+ mov $2*16, %ecx
22594+ repe scasl
22595+ jecxz 2f
22596+ jne 1b
22597+
22598+2: cld
22599+ mov %esp, %ecx
22600+ sub %edi, %ecx
22601+
22602+ cmp $THREAD_SIZE_asm, %ecx
22603+ jb 3f
22604+ ud2
22605+3:
22606+
22607+ shr $2, %ecx
22608+ rep stosl
22609+
22610+ mov TI_task_thread_sp0(%ebp), %edi
22611+ sub $128, %edi
22612+ mov %edi, TI_lowest_stack(%ebp)
22613+
22614+ popl %eax
22615+ popl %ecx
22616+ popl %edi
22617+ ret
22618+ENDPROC(pax_erase_kstack)
22619+#endif
22620+
22621+.macro __SAVE_ALL _DS
22622 cld
22623 PUSH_GS
22624 pushl_cfi %fs
22625@@ -206,7 +346,7 @@
22626 CFI_REL_OFFSET ecx, 0
22627 pushl_cfi %ebx
22628 CFI_REL_OFFSET ebx, 0
22629- movl $(__USER_DS), %edx
22630+ movl $\_DS, %edx
22631 movl %edx, %ds
22632 movl %edx, %es
22633 movl $(__KERNEL_PERCPU), %edx
22634@@ -214,6 +354,15 @@
22635 SET_KERNEL_GS %edx
22636 .endm
22637
22638+.macro SAVE_ALL
22639+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22640+ __SAVE_ALL __KERNEL_DS
22641+ pax_enter_kernel
22642+#else
22643+ __SAVE_ALL __USER_DS
22644+#endif
22645+.endm
22646+
22647 .macro RESTORE_INT_REGS
22648 popl_cfi %ebx
22649 CFI_RESTORE ebx
22650@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
22651 popfl_cfi
22652 jmp syscall_exit
22653 CFI_ENDPROC
22654-END(ret_from_fork)
22655+ENDPROC(ret_from_fork)
22656
22657 ENTRY(ret_from_kernel_thread)
22658 CFI_STARTPROC
22659@@ -340,7 +489,15 @@ ret_from_intr:
22660 andl $SEGMENT_RPL_MASK, %eax
22661 #endif
22662 cmpl $USER_RPL, %eax
22663+
22664+#ifdef CONFIG_PAX_KERNEXEC
22665+ jae resume_userspace
22666+
22667+ pax_exit_kernel
22668+ jmp resume_kernel
22669+#else
22670 jb resume_kernel # not returning to v8086 or userspace
22671+#endif
22672
22673 ENTRY(resume_userspace)
22674 LOCKDEP_SYS_EXIT
22675@@ -352,8 +509,8 @@ ENTRY(resume_userspace)
22676 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22677 # int/exception return?
22678 jne work_pending
22679- jmp restore_all
22680-END(ret_from_exception)
22681+ jmp restore_all_pax
22682+ENDPROC(ret_from_exception)
22683
22684 #ifdef CONFIG_PREEMPT
22685 ENTRY(resume_kernel)
22686@@ -365,7 +522,7 @@ need_resched:
22687 jz restore_all
22688 call preempt_schedule_irq
22689 jmp need_resched
22690-END(resume_kernel)
22691+ENDPROC(resume_kernel)
22692 #endif
22693 CFI_ENDPROC
22694
22695@@ -395,30 +552,45 @@ sysenter_past_esp:
22696 /*CFI_REL_OFFSET cs, 0*/
22697 /*
22698 * Push current_thread_info()->sysenter_return to the stack.
22699- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22700- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22701 */
22702- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22703+ pushl_cfi $0
22704 CFI_REL_OFFSET eip, 0
22705
22706 pushl_cfi %eax
22707 SAVE_ALL
22708+ GET_THREAD_INFO(%ebp)
22709+ movl TI_sysenter_return(%ebp),%ebp
22710+ movl %ebp,PT_EIP(%esp)
22711 ENABLE_INTERRUPTS(CLBR_NONE)
22712
22713 /*
22714 * Load the potential sixth argument from user stack.
22715 * Careful about security.
22716 */
22717+ movl PT_OLDESP(%esp),%ebp
22718+
22719+#ifdef CONFIG_PAX_MEMORY_UDEREF
22720+ mov PT_OLDSS(%esp),%ds
22721+1: movl %ds:(%ebp),%ebp
22722+ push %ss
22723+ pop %ds
22724+#else
22725 cmpl $__PAGE_OFFSET-3,%ebp
22726 jae syscall_fault
22727 ASM_STAC
22728 1: movl (%ebp),%ebp
22729 ASM_CLAC
22730+#endif
22731+
22732 movl %ebp,PT_EBP(%esp)
22733 _ASM_EXTABLE(1b,syscall_fault)
22734
22735 GET_THREAD_INFO(%ebp)
22736
22737+#ifdef CONFIG_PAX_RANDKSTACK
22738+ pax_erase_kstack
22739+#endif
22740+
22741 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22742 jnz sysenter_audit
22743 sysenter_do_call:
22744@@ -434,12 +606,24 @@ sysenter_after_call:
22745 testl $_TIF_ALLWORK_MASK, %ecx
22746 jne sysexit_audit
22747 sysenter_exit:
22748+
22749+#ifdef CONFIG_PAX_RANDKSTACK
22750+ pushl_cfi %eax
22751+ movl %esp, %eax
22752+ call pax_randomize_kstack
22753+ popl_cfi %eax
22754+#endif
22755+
22756+ pax_erase_kstack
22757+
22758 /* if something modifies registers it must also disable sysexit */
22759 movl PT_EIP(%esp), %edx
22760 movl PT_OLDESP(%esp), %ecx
22761 xorl %ebp,%ebp
22762 TRACE_IRQS_ON
22763 1: mov PT_FS(%esp), %fs
22764+2: mov PT_DS(%esp), %ds
22765+3: mov PT_ES(%esp), %es
22766 PTGS_TO_GS
22767 ENABLE_INTERRUPTS_SYSEXIT
22768
22769@@ -456,6 +640,9 @@ sysenter_audit:
22770 movl %eax,%edx /* 2nd arg: syscall number */
22771 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
22772 call __audit_syscall_entry
22773+
22774+ pax_erase_kstack
22775+
22776 pushl_cfi %ebx
22777 movl PT_EAX(%esp),%eax /* reload syscall number */
22778 jmp sysenter_do_call
22779@@ -481,10 +668,16 @@ sysexit_audit:
22780
22781 CFI_ENDPROC
22782 .pushsection .fixup,"ax"
22783-2: movl $0,PT_FS(%esp)
22784+4: movl $0,PT_FS(%esp)
22785+ jmp 1b
22786+5: movl $0,PT_DS(%esp)
22787+ jmp 1b
22788+6: movl $0,PT_ES(%esp)
22789 jmp 1b
22790 .popsection
22791- _ASM_EXTABLE(1b,2b)
22792+ _ASM_EXTABLE(1b,4b)
22793+ _ASM_EXTABLE(2b,5b)
22794+ _ASM_EXTABLE(3b,6b)
22795 PTGS_TO_GS_EX
22796 ENDPROC(ia32_sysenter_target)
22797
22798@@ -495,6 +688,11 @@ ENTRY(system_call)
22799 pushl_cfi %eax # save orig_eax
22800 SAVE_ALL
22801 GET_THREAD_INFO(%ebp)
22802+
22803+#ifdef CONFIG_PAX_RANDKSTACK
22804+ pax_erase_kstack
22805+#endif
22806+
22807 # system call tracing in operation / emulation
22808 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22809 jnz syscall_trace_entry
22810@@ -514,6 +712,15 @@ syscall_exit:
22811 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22812 jne syscall_exit_work
22813
22814+restore_all_pax:
22815+
22816+#ifdef CONFIG_PAX_RANDKSTACK
22817+ movl %esp, %eax
22818+ call pax_randomize_kstack
22819+#endif
22820+
22821+ pax_erase_kstack
22822+
22823 restore_all:
22824 TRACE_IRQS_IRET
22825 restore_all_notrace:
22826@@ -568,14 +775,34 @@ ldt_ss:
22827 * compensating for the offset by changing to the ESPFIX segment with
22828 * a base address that matches for the difference.
22829 */
22830-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22831+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22832 mov %esp, %edx /* load kernel esp */
22833 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22834 mov %dx, %ax /* eax: new kernel esp */
22835 sub %eax, %edx /* offset (low word is 0) */
22836+#ifdef CONFIG_SMP
22837+ movl PER_CPU_VAR(cpu_number), %ebx
22838+ shll $PAGE_SHIFT_asm, %ebx
22839+ addl $cpu_gdt_table, %ebx
22840+#else
22841+ movl $cpu_gdt_table, %ebx
22842+#endif
22843 shr $16, %edx
22844- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22845- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22846+
22847+#ifdef CONFIG_PAX_KERNEXEC
22848+ mov %cr0, %esi
22849+ btr $16, %esi
22850+ mov %esi, %cr0
22851+#endif
22852+
22853+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22854+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22855+
22856+#ifdef CONFIG_PAX_KERNEXEC
22857+ bts $16, %esi
22858+ mov %esi, %cr0
22859+#endif
22860+
22861 pushl_cfi $__ESPFIX_SS
22862 pushl_cfi %eax /* new kernel esp */
22863 /* Disable interrupts, but do not irqtrace this section: we
22864@@ -605,20 +832,18 @@ work_resched:
22865 movl TI_flags(%ebp), %ecx
22866 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22867 # than syscall tracing?
22868- jz restore_all
22869+ jz restore_all_pax
22870 testb $_TIF_NEED_RESCHED, %cl
22871 jnz work_resched
22872
22873 work_notifysig: # deal with pending signals and
22874 # notify-resume requests
22875+ movl %esp, %eax
22876 #ifdef CONFIG_VM86
22877 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22878- movl %esp, %eax
22879 jne work_notifysig_v86 # returning to kernel-space or
22880 # vm86-space
22881 1:
22882-#else
22883- movl %esp, %eax
22884 #endif
22885 TRACE_IRQS_ON
22886 ENABLE_INTERRUPTS(CLBR_NONE)
22887@@ -639,7 +864,7 @@ work_notifysig_v86:
22888 movl %eax, %esp
22889 jmp 1b
22890 #endif
22891-END(work_pending)
22892+ENDPROC(work_pending)
22893
22894 # perform syscall exit tracing
22895 ALIGN
22896@@ -647,11 +872,14 @@ syscall_trace_entry:
22897 movl $-ENOSYS,PT_EAX(%esp)
22898 movl %esp, %eax
22899 call syscall_trace_enter
22900+
22901+ pax_erase_kstack
22902+
22903 /* What it returned is what we'll actually use. */
22904 cmpl $(NR_syscalls), %eax
22905 jnae syscall_call
22906 jmp syscall_exit
22907-END(syscall_trace_entry)
22908+ENDPROC(syscall_trace_entry)
22909
22910 # perform syscall exit tracing
22911 ALIGN
22912@@ -664,26 +892,30 @@ syscall_exit_work:
22913 movl %esp, %eax
22914 call syscall_trace_leave
22915 jmp resume_userspace
22916-END(syscall_exit_work)
22917+ENDPROC(syscall_exit_work)
22918 CFI_ENDPROC
22919
22920 RING0_INT_FRAME # can't unwind into user space anyway
22921 syscall_fault:
22922+#ifdef CONFIG_PAX_MEMORY_UDEREF
22923+ push %ss
22924+ pop %ds
22925+#endif
22926 ASM_CLAC
22927 GET_THREAD_INFO(%ebp)
22928 movl $-EFAULT,PT_EAX(%esp)
22929 jmp resume_userspace
22930-END(syscall_fault)
22931+ENDPROC(syscall_fault)
22932
22933 syscall_badsys:
22934 movl $-ENOSYS,%eax
22935 jmp syscall_after_call
22936-END(syscall_badsys)
22937+ENDPROC(syscall_badsys)
22938
22939 sysenter_badsys:
22940 movl $-ENOSYS,%eax
22941 jmp sysenter_after_call
22942-END(syscall_badsys)
22943+ENDPROC(sysenter_badsys)
22944 CFI_ENDPROC
22945
22946 .macro FIXUP_ESPFIX_STACK
22947@@ -696,8 +928,15 @@ END(syscall_badsys)
22948 */
22949 #ifdef CONFIG_X86_ESPFIX32
22950 /* fixup the stack */
22951- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22952- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22953+#ifdef CONFIG_SMP
22954+ movl PER_CPU_VAR(cpu_number), %ebx
22955+ shll $PAGE_SHIFT_asm, %ebx
22956+ addl $cpu_gdt_table, %ebx
22957+#else
22958+ movl $cpu_gdt_table, %ebx
22959+#endif
22960+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22961+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22962 shl $16, %eax
22963 addl %esp, %eax /* the adjusted stack pointer */
22964 pushl_cfi $__KERNEL_DS
22965@@ -753,7 +992,7 @@ vector=vector+1
22966 .endr
22967 2: jmp common_interrupt
22968 .endr
22969-END(irq_entries_start)
22970+ENDPROC(irq_entries_start)
22971
22972 .previous
22973 END(interrupt)
22974@@ -810,7 +1049,7 @@ ENTRY(coprocessor_error)
22975 pushl_cfi $do_coprocessor_error
22976 jmp error_code
22977 CFI_ENDPROC
22978-END(coprocessor_error)
22979+ENDPROC(coprocessor_error)
22980
22981 ENTRY(simd_coprocessor_error)
22982 RING0_INT_FRAME
22983@@ -823,7 +1062,7 @@ ENTRY(simd_coprocessor_error)
22984 .section .altinstructions,"a"
22985 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22986 .previous
22987-.section .altinstr_replacement,"ax"
22988+.section .altinstr_replacement,"a"
22989 663: pushl $do_simd_coprocessor_error
22990 664:
22991 .previous
22992@@ -832,7 +1071,7 @@ ENTRY(simd_coprocessor_error)
22993 #endif
22994 jmp error_code
22995 CFI_ENDPROC
22996-END(simd_coprocessor_error)
22997+ENDPROC(simd_coprocessor_error)
22998
22999 ENTRY(device_not_available)
23000 RING0_INT_FRAME
23001@@ -841,18 +1080,18 @@ ENTRY(device_not_available)
23002 pushl_cfi $do_device_not_available
23003 jmp error_code
23004 CFI_ENDPROC
23005-END(device_not_available)
23006+ENDPROC(device_not_available)
23007
23008 #ifdef CONFIG_PARAVIRT
23009 ENTRY(native_iret)
23010 iret
23011 _ASM_EXTABLE(native_iret, iret_exc)
23012-END(native_iret)
23013+ENDPROC(native_iret)
23014
23015 ENTRY(native_irq_enable_sysexit)
23016 sti
23017 sysexit
23018-END(native_irq_enable_sysexit)
23019+ENDPROC(native_irq_enable_sysexit)
23020 #endif
23021
23022 ENTRY(overflow)
23023@@ -862,7 +1101,7 @@ ENTRY(overflow)
23024 pushl_cfi $do_overflow
23025 jmp error_code
23026 CFI_ENDPROC
23027-END(overflow)
23028+ENDPROC(overflow)
23029
23030 ENTRY(bounds)
23031 RING0_INT_FRAME
23032@@ -871,7 +1110,7 @@ ENTRY(bounds)
23033 pushl_cfi $do_bounds
23034 jmp error_code
23035 CFI_ENDPROC
23036-END(bounds)
23037+ENDPROC(bounds)
23038
23039 ENTRY(invalid_op)
23040 RING0_INT_FRAME
23041@@ -880,7 +1119,7 @@ ENTRY(invalid_op)
23042 pushl_cfi $do_invalid_op
23043 jmp error_code
23044 CFI_ENDPROC
23045-END(invalid_op)
23046+ENDPROC(invalid_op)
23047
23048 ENTRY(coprocessor_segment_overrun)
23049 RING0_INT_FRAME
23050@@ -889,7 +1128,7 @@ ENTRY(coprocessor_segment_overrun)
23051 pushl_cfi $do_coprocessor_segment_overrun
23052 jmp error_code
23053 CFI_ENDPROC
23054-END(coprocessor_segment_overrun)
23055+ENDPROC(coprocessor_segment_overrun)
23056
23057 ENTRY(invalid_TSS)
23058 RING0_EC_FRAME
23059@@ -897,7 +1136,7 @@ ENTRY(invalid_TSS)
23060 pushl_cfi $do_invalid_TSS
23061 jmp error_code
23062 CFI_ENDPROC
23063-END(invalid_TSS)
23064+ENDPROC(invalid_TSS)
23065
23066 ENTRY(segment_not_present)
23067 RING0_EC_FRAME
23068@@ -905,7 +1144,7 @@ ENTRY(segment_not_present)
23069 pushl_cfi $do_segment_not_present
23070 jmp error_code
23071 CFI_ENDPROC
23072-END(segment_not_present)
23073+ENDPROC(segment_not_present)
23074
23075 ENTRY(stack_segment)
23076 RING0_EC_FRAME
23077@@ -913,7 +1152,7 @@ ENTRY(stack_segment)
23078 pushl_cfi $do_stack_segment
23079 jmp error_code
23080 CFI_ENDPROC
23081-END(stack_segment)
23082+ENDPROC(stack_segment)
23083
23084 ENTRY(alignment_check)
23085 RING0_EC_FRAME
23086@@ -921,7 +1160,7 @@ ENTRY(alignment_check)
23087 pushl_cfi $do_alignment_check
23088 jmp error_code
23089 CFI_ENDPROC
23090-END(alignment_check)
23091+ENDPROC(alignment_check)
23092
23093 ENTRY(divide_error)
23094 RING0_INT_FRAME
23095@@ -930,7 +1169,7 @@ ENTRY(divide_error)
23096 pushl_cfi $do_divide_error
23097 jmp error_code
23098 CFI_ENDPROC
23099-END(divide_error)
23100+ENDPROC(divide_error)
23101
23102 #ifdef CONFIG_X86_MCE
23103 ENTRY(machine_check)
23104@@ -940,7 +1179,7 @@ ENTRY(machine_check)
23105 pushl_cfi machine_check_vector
23106 jmp error_code
23107 CFI_ENDPROC
23108-END(machine_check)
23109+ENDPROC(machine_check)
23110 #endif
23111
23112 ENTRY(spurious_interrupt_bug)
23113@@ -950,7 +1189,7 @@ ENTRY(spurious_interrupt_bug)
23114 pushl_cfi $do_spurious_interrupt_bug
23115 jmp error_code
23116 CFI_ENDPROC
23117-END(spurious_interrupt_bug)
23118+ENDPROC(spurious_interrupt_bug)
23119
23120 #ifdef CONFIG_XEN
23121 /* Xen doesn't set %esp to be precisely what the normal sysenter
23122@@ -1056,7 +1295,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
23123
23124 ENTRY(mcount)
23125 ret
23126-END(mcount)
23127+ENDPROC(mcount)
23128
23129 ENTRY(ftrace_caller)
23130 cmpl $0, function_trace_stop
23131@@ -1089,7 +1328,7 @@ ftrace_graph_call:
23132 .globl ftrace_stub
23133 ftrace_stub:
23134 ret
23135-END(ftrace_caller)
23136+ENDPROC(ftrace_caller)
23137
23138 ENTRY(ftrace_regs_caller)
23139 pushf /* push flags before compare (in cs location) */
23140@@ -1193,7 +1432,7 @@ trace:
23141 popl %ecx
23142 popl %eax
23143 jmp ftrace_stub
23144-END(mcount)
23145+ENDPROC(mcount)
23146 #endif /* CONFIG_DYNAMIC_FTRACE */
23147 #endif /* CONFIG_FUNCTION_TRACER */
23148
23149@@ -1211,7 +1450,7 @@ ENTRY(ftrace_graph_caller)
23150 popl %ecx
23151 popl %eax
23152 ret
23153-END(ftrace_graph_caller)
23154+ENDPROC(ftrace_graph_caller)
23155
23156 .globl return_to_handler
23157 return_to_handler:
23158@@ -1272,15 +1511,18 @@ error_code:
23159 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
23160 REG_TO_PTGS %ecx
23161 SET_KERNEL_GS %ecx
23162- movl $(__USER_DS), %ecx
23163+ movl $(__KERNEL_DS), %ecx
23164 movl %ecx, %ds
23165 movl %ecx, %es
23166+
23167+ pax_enter_kernel
23168+
23169 TRACE_IRQS_OFF
23170 movl %esp,%eax # pt_regs pointer
23171 call *%edi
23172 jmp ret_from_exception
23173 CFI_ENDPROC
23174-END(page_fault)
23175+ENDPROC(page_fault)
23176
23177 /*
23178 * Debug traps and NMI can happen at the one SYSENTER instruction
23179@@ -1323,7 +1565,7 @@ debug_stack_correct:
23180 call do_debug
23181 jmp ret_from_exception
23182 CFI_ENDPROC
23183-END(debug)
23184+ENDPROC(debug)
23185
23186 /*
23187 * NMI is doubly nasty. It can happen _while_ we're handling
23188@@ -1363,6 +1605,9 @@ nmi_stack_correct:
23189 xorl %edx,%edx # zero error code
23190 movl %esp,%eax # pt_regs pointer
23191 call do_nmi
23192+
23193+ pax_exit_kernel
23194+
23195 jmp restore_all_notrace
23196 CFI_ENDPROC
23197
23198@@ -1400,13 +1645,16 @@ nmi_espfix_stack:
23199 FIXUP_ESPFIX_STACK # %eax == %esp
23200 xorl %edx,%edx # zero error code
23201 call do_nmi
23202+
23203+ pax_exit_kernel
23204+
23205 RESTORE_REGS
23206 lss 12+4(%esp), %esp # back to espfix stack
23207 CFI_ADJUST_CFA_OFFSET -24
23208 jmp irq_return
23209 #endif
23210 CFI_ENDPROC
23211-END(nmi)
23212+ENDPROC(nmi)
23213
23214 ENTRY(int3)
23215 RING0_INT_FRAME
23216@@ -1419,14 +1667,14 @@ ENTRY(int3)
23217 call do_int3
23218 jmp ret_from_exception
23219 CFI_ENDPROC
23220-END(int3)
23221+ENDPROC(int3)
23222
23223 ENTRY(general_protection)
23224 RING0_EC_FRAME
23225 pushl_cfi $do_general_protection
23226 jmp error_code
23227 CFI_ENDPROC
23228-END(general_protection)
23229+ENDPROC(general_protection)
23230
23231 #ifdef CONFIG_KVM_GUEST
23232 ENTRY(async_page_fault)
23233@@ -1435,6 +1683,6 @@ ENTRY(async_page_fault)
23234 pushl_cfi $do_async_page_fault
23235 jmp error_code
23236 CFI_ENDPROC
23237-END(async_page_fault)
23238+ENDPROC(async_page_fault)
23239 #endif
23240
23241diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23242index c844f08..b07ea0e 100644
23243--- a/arch/x86/kernel/entry_64.S
23244+++ b/arch/x86/kernel/entry_64.S
23245@@ -59,6 +59,8 @@
23246 #include <asm/smap.h>
23247 #include <asm/pgtable_types.h>
23248 #include <linux/err.h>
23249+#include <asm/pgtable.h>
23250+#include <asm/alternative-asm.h>
23251
23252 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23253 #include <linux/elf-em.h>
23254@@ -81,6 +83,430 @@ ENTRY(native_usergs_sysret64)
23255 ENDPROC(native_usergs_sysret64)
23256 #endif /* CONFIG_PARAVIRT */
23257
23258+ .macro ljmpq sel, off
23259+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23260+ .byte 0x48; ljmp *1234f(%rip)
23261+ .pushsection .rodata
23262+ .align 16
23263+ 1234: .quad \off; .word \sel
23264+ .popsection
23265+#else
23266+ pushq $\sel
23267+ pushq $\off
23268+ lretq
23269+#endif
23270+ .endm
23271+
23272+ .macro pax_enter_kernel
23273+ pax_set_fptr_mask
23274+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23275+ call pax_enter_kernel
23276+#endif
23277+ .endm
23278+
23279+ .macro pax_exit_kernel
23280+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23281+ call pax_exit_kernel
23282+#endif
23283+
23284+ .endm
23285+
23286+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23287+ENTRY(pax_enter_kernel)
23288+ pushq %rdi
23289+
23290+#ifdef CONFIG_PARAVIRT
23291+ PV_SAVE_REGS(CLBR_RDI)
23292+#endif
23293+
23294+#ifdef CONFIG_PAX_KERNEXEC
23295+ GET_CR0_INTO_RDI
23296+ bts $16,%rdi
23297+ jnc 3f
23298+ mov %cs,%edi
23299+ cmp $__KERNEL_CS,%edi
23300+ jnz 2f
23301+1:
23302+#endif
23303+
23304+#ifdef CONFIG_PAX_MEMORY_UDEREF
23305+ 661: jmp 111f
23306+ .pushsection .altinstr_replacement, "a"
23307+ 662: ASM_NOP2
23308+ .popsection
23309+ .pushsection .altinstructions, "a"
23310+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23311+ .popsection
23312+ GET_CR3_INTO_RDI
23313+ cmp $0,%dil
23314+ jnz 112f
23315+ mov $__KERNEL_DS,%edi
23316+ mov %edi,%ss
23317+ jmp 111f
23318+112: cmp $1,%dil
23319+ jz 113f
23320+ ud2
23321+113: sub $4097,%rdi
23322+ bts $63,%rdi
23323+ SET_RDI_INTO_CR3
23324+ mov $__UDEREF_KERNEL_DS,%edi
23325+ mov %edi,%ss
23326+111:
23327+#endif
23328+
23329+#ifdef CONFIG_PARAVIRT
23330+ PV_RESTORE_REGS(CLBR_RDI)
23331+#endif
23332+
23333+ popq %rdi
23334+ pax_force_retaddr
23335+ retq
23336+
23337+#ifdef CONFIG_PAX_KERNEXEC
23338+2: ljmpq __KERNEL_CS,1b
23339+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23340+4: SET_RDI_INTO_CR0
23341+ jmp 1b
23342+#endif
23343+ENDPROC(pax_enter_kernel)
23344+
23345+ENTRY(pax_exit_kernel)
23346+ pushq %rdi
23347+
23348+#ifdef CONFIG_PARAVIRT
23349+ PV_SAVE_REGS(CLBR_RDI)
23350+#endif
23351+
23352+#ifdef CONFIG_PAX_KERNEXEC
23353+ mov %cs,%rdi
23354+ cmp $__KERNEXEC_KERNEL_CS,%edi
23355+ jz 2f
23356+ GET_CR0_INTO_RDI
23357+ bts $16,%rdi
23358+ jnc 4f
23359+1:
23360+#endif
23361+
23362+#ifdef CONFIG_PAX_MEMORY_UDEREF
23363+ 661: jmp 111f
23364+ .pushsection .altinstr_replacement, "a"
23365+ 662: ASM_NOP2
23366+ .popsection
23367+ .pushsection .altinstructions, "a"
23368+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23369+ .popsection
23370+ mov %ss,%edi
23371+ cmp $__UDEREF_KERNEL_DS,%edi
23372+ jnz 111f
23373+ GET_CR3_INTO_RDI
23374+ cmp $0,%dil
23375+ jz 112f
23376+ ud2
23377+112: add $4097,%rdi
23378+ bts $63,%rdi
23379+ SET_RDI_INTO_CR3
23380+ mov $__KERNEL_DS,%edi
23381+ mov %edi,%ss
23382+111:
23383+#endif
23384+
23385+#ifdef CONFIG_PARAVIRT
23386+ PV_RESTORE_REGS(CLBR_RDI);
23387+#endif
23388+
23389+ popq %rdi
23390+ pax_force_retaddr
23391+ retq
23392+
23393+#ifdef CONFIG_PAX_KERNEXEC
23394+2: GET_CR0_INTO_RDI
23395+ btr $16,%rdi
23396+ jnc 4f
23397+ ljmpq __KERNEL_CS,3f
23398+3: SET_RDI_INTO_CR0
23399+ jmp 1b
23400+4: ud2
23401+ jmp 4b
23402+#endif
23403+ENDPROC(pax_exit_kernel)
23404+#endif
23405+
23406+ .macro pax_enter_kernel_user
23407+ pax_set_fptr_mask
23408+#ifdef CONFIG_PAX_MEMORY_UDEREF
23409+ call pax_enter_kernel_user
23410+#endif
23411+ .endm
23412+
23413+ .macro pax_exit_kernel_user
23414+#ifdef CONFIG_PAX_MEMORY_UDEREF
23415+ call pax_exit_kernel_user
23416+#endif
23417+#ifdef CONFIG_PAX_RANDKSTACK
23418+ pushq %rax
23419+ pushq %r11
23420+ call pax_randomize_kstack
23421+ popq %r11
23422+ popq %rax
23423+#endif
23424+ .endm
23425+
23426+#ifdef CONFIG_PAX_MEMORY_UDEREF
23427+ENTRY(pax_enter_kernel_user)
23428+ pushq %rdi
23429+ pushq %rbx
23430+
23431+#ifdef CONFIG_PARAVIRT
23432+ PV_SAVE_REGS(CLBR_RDI)
23433+#endif
23434+
23435+ 661: jmp 111f
23436+ .pushsection .altinstr_replacement, "a"
23437+ 662: ASM_NOP2
23438+ .popsection
23439+ .pushsection .altinstructions, "a"
23440+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23441+ .popsection
23442+ GET_CR3_INTO_RDI
23443+ cmp $1,%dil
23444+ jnz 4f
23445+ sub $4097,%rdi
23446+ bts $63,%rdi
23447+ SET_RDI_INTO_CR3
23448+ jmp 3f
23449+111:
23450+
23451+ GET_CR3_INTO_RDI
23452+ mov %rdi,%rbx
23453+ add $__START_KERNEL_map,%rbx
23454+ sub phys_base(%rip),%rbx
23455+
23456+#ifdef CONFIG_PARAVIRT
23457+ cmpl $0, pv_info+PARAVIRT_enabled
23458+ jz 1f
23459+ pushq %rdi
23460+ i = 0
23461+ .rept USER_PGD_PTRS
23462+ mov i*8(%rbx),%rsi
23463+ mov $0,%sil
23464+ lea i*8(%rbx),%rdi
23465+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23466+ i = i + 1
23467+ .endr
23468+ popq %rdi
23469+ jmp 2f
23470+1:
23471+#endif
23472+
23473+ i = 0
23474+ .rept USER_PGD_PTRS
23475+ movb $0,i*8(%rbx)
23476+ i = i + 1
23477+ .endr
23478+
23479+2: SET_RDI_INTO_CR3
23480+
23481+#ifdef CONFIG_PAX_KERNEXEC
23482+ GET_CR0_INTO_RDI
23483+ bts $16,%rdi
23484+ SET_RDI_INTO_CR0
23485+#endif
23486+
23487+3:
23488+
23489+#ifdef CONFIG_PARAVIRT
23490+ PV_RESTORE_REGS(CLBR_RDI)
23491+#endif
23492+
23493+ popq %rbx
23494+ popq %rdi
23495+ pax_force_retaddr
23496+ retq
23497+4: ud2
23498+ENDPROC(pax_enter_kernel_user)
23499+
23500+ENTRY(pax_exit_kernel_user)
23501+ pushq %rdi
23502+ pushq %rbx
23503+
23504+#ifdef CONFIG_PARAVIRT
23505+ PV_SAVE_REGS(CLBR_RDI)
23506+#endif
23507+
23508+ GET_CR3_INTO_RDI
23509+ 661: jmp 1f
23510+ .pushsection .altinstr_replacement, "a"
23511+ 662: ASM_NOP2
23512+ .popsection
23513+ .pushsection .altinstructions, "a"
23514+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23515+ .popsection
23516+ cmp $0,%dil
23517+ jnz 3f
23518+ add $4097,%rdi
23519+ bts $63,%rdi
23520+ SET_RDI_INTO_CR3
23521+ jmp 2f
23522+1:
23523+
23524+ mov %rdi,%rbx
23525+
23526+#ifdef CONFIG_PAX_KERNEXEC
23527+ GET_CR0_INTO_RDI
23528+ btr $16,%rdi
23529+ jnc 3f
23530+ SET_RDI_INTO_CR0
23531+#endif
23532+
23533+ add $__START_KERNEL_map,%rbx
23534+ sub phys_base(%rip),%rbx
23535+
23536+#ifdef CONFIG_PARAVIRT
23537+ cmpl $0, pv_info+PARAVIRT_enabled
23538+ jz 1f
23539+ i = 0
23540+ .rept USER_PGD_PTRS
23541+ mov i*8(%rbx),%rsi
23542+ mov $0x67,%sil
23543+ lea i*8(%rbx),%rdi
23544+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23545+ i = i + 1
23546+ .endr
23547+ jmp 2f
23548+1:
23549+#endif
23550+
23551+ i = 0
23552+ .rept USER_PGD_PTRS
23553+ movb $0x67,i*8(%rbx)
23554+ i = i + 1
23555+ .endr
23556+2:
23557+
23558+#ifdef CONFIG_PARAVIRT
23559+ PV_RESTORE_REGS(CLBR_RDI)
23560+#endif
23561+
23562+ popq %rbx
23563+ popq %rdi
23564+ pax_force_retaddr
23565+ retq
23566+3: ud2
23567+ENDPROC(pax_exit_kernel_user)
23568+#endif
23569+
23570+ .macro pax_enter_kernel_nmi
23571+ pax_set_fptr_mask
23572+
23573+#ifdef CONFIG_PAX_KERNEXEC
23574+ GET_CR0_INTO_RDI
23575+ bts $16,%rdi
23576+ jc 110f
23577+ SET_RDI_INTO_CR0
23578+ or $2,%ebx
23579+110:
23580+#endif
23581+
23582+#ifdef CONFIG_PAX_MEMORY_UDEREF
23583+ 661: jmp 111f
23584+ .pushsection .altinstr_replacement, "a"
23585+ 662: ASM_NOP2
23586+ .popsection
23587+ .pushsection .altinstructions, "a"
23588+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23589+ .popsection
23590+ GET_CR3_INTO_RDI
23591+ cmp $0,%dil
23592+ jz 111f
23593+ sub $4097,%rdi
23594+ or $4,%ebx
23595+ bts $63,%rdi
23596+ SET_RDI_INTO_CR3
23597+ mov $__UDEREF_KERNEL_DS,%edi
23598+ mov %edi,%ss
23599+111:
23600+#endif
23601+ .endm
23602+
23603+ .macro pax_exit_kernel_nmi
23604+#ifdef CONFIG_PAX_KERNEXEC
23605+ btr $1,%ebx
23606+ jnc 110f
23607+ GET_CR0_INTO_RDI
23608+ btr $16,%rdi
23609+ SET_RDI_INTO_CR0
23610+110:
23611+#endif
23612+
23613+#ifdef CONFIG_PAX_MEMORY_UDEREF
23614+ btr $2,%ebx
23615+ jnc 111f
23616+ GET_CR3_INTO_RDI
23617+ add $4097,%rdi
23618+ bts $63,%rdi
23619+ SET_RDI_INTO_CR3
23620+ mov $__KERNEL_DS,%edi
23621+ mov %edi,%ss
23622+111:
23623+#endif
23624+ .endm
23625+
23626+ .macro pax_erase_kstack
23627+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23628+ call pax_erase_kstack
23629+#endif
23630+ .endm
23631+
23632+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23633+ENTRY(pax_erase_kstack)
23634+ pushq %rdi
23635+ pushq %rcx
23636+ pushq %rax
23637+ pushq %r11
23638+
23639+ GET_THREAD_INFO(%r11)
23640+ mov TI_lowest_stack(%r11), %rdi
23641+ mov $-0xBEEF, %rax
23642+ std
23643+
23644+1: mov %edi, %ecx
23645+ and $THREAD_SIZE_asm - 1, %ecx
23646+ shr $3, %ecx
23647+ repne scasq
23648+ jecxz 2f
23649+
23650+ cmp $2*8, %ecx
23651+ jc 2f
23652+
23653+ mov $2*8, %ecx
23654+ repe scasq
23655+ jecxz 2f
23656+ jne 1b
23657+
23658+2: cld
23659+ mov %esp, %ecx
23660+ sub %edi, %ecx
23661+
23662+ cmp $THREAD_SIZE_asm, %rcx
23663+ jb 3f
23664+ ud2
23665+3:
23666+
23667+ shr $3, %ecx
23668+ rep stosq
23669+
23670+ mov TI_task_thread_sp0(%r11), %rdi
23671+ sub $256, %rdi
23672+ mov %rdi, TI_lowest_stack(%r11)
23673+
23674+ popq %r11
23675+ popq %rax
23676+ popq %rcx
23677+ popq %rdi
23678+ pax_force_retaddr
23679+ ret
23680+ENDPROC(pax_erase_kstack)
23681+#endif
23682
23683 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23684 #ifdef CONFIG_TRACE_IRQFLAGS
23685@@ -117,7 +543,7 @@ ENDPROC(native_usergs_sysret64)
23686 .endm
23687
23688 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23689- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23690+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23691 jnc 1f
23692 TRACE_IRQS_ON_DEBUG
23693 1:
23694@@ -155,27 +581,6 @@ ENDPROC(native_usergs_sysret64)
23695 movq \tmp,R11+\offset(%rsp)
23696 .endm
23697
23698- .macro FAKE_STACK_FRAME child_rip
23699- /* push in order ss, rsp, eflags, cs, rip */
23700- xorl %eax, %eax
23701- pushq_cfi $__KERNEL_DS /* ss */
23702- /*CFI_REL_OFFSET ss,0*/
23703- pushq_cfi %rax /* rsp */
23704- CFI_REL_OFFSET rsp,0
23705- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
23706- /*CFI_REL_OFFSET rflags,0*/
23707- pushq_cfi $__KERNEL_CS /* cs */
23708- /*CFI_REL_OFFSET cs,0*/
23709- pushq_cfi \child_rip /* rip */
23710- CFI_REL_OFFSET rip,0
23711- pushq_cfi %rax /* orig rax */
23712- .endm
23713-
23714- .macro UNFAKE_STACK_FRAME
23715- addq $8*6, %rsp
23716- CFI_ADJUST_CFA_OFFSET -(6*8)
23717- .endm
23718-
23719 /*
23720 * initial frame state for interrupts (and exceptions without error code)
23721 */
23722@@ -242,25 +647,26 @@ ENDPROC(native_usergs_sysret64)
23723 /* save partial stack frame */
23724 .macro SAVE_ARGS_IRQ
23725 cld
23726- /* start from rbp in pt_regs and jump over */
23727- movq_cfi rdi, (RDI-RBP)
23728- movq_cfi rsi, (RSI-RBP)
23729- movq_cfi rdx, (RDX-RBP)
23730- movq_cfi rcx, (RCX-RBP)
23731- movq_cfi rax, (RAX-RBP)
23732- movq_cfi r8, (R8-RBP)
23733- movq_cfi r9, (R9-RBP)
23734- movq_cfi r10, (R10-RBP)
23735- movq_cfi r11, (R11-RBP)
23736+ /* start from r15 in pt_regs and jump over */
23737+ movq_cfi rdi, RDI
23738+ movq_cfi rsi, RSI
23739+ movq_cfi rdx, RDX
23740+ movq_cfi rcx, RCX
23741+ movq_cfi rax, RAX
23742+ movq_cfi r8, R8
23743+ movq_cfi r9, R9
23744+ movq_cfi r10, R10
23745+ movq_cfi r11, R11
23746+ movq_cfi r12, R12
23747
23748 /* Save rbp so that we can unwind from get_irq_regs() */
23749- movq_cfi rbp, 0
23750+ movq_cfi rbp, RBP
23751
23752 /* Save previous stack value */
23753 movq %rsp, %rsi
23754
23755- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23756- testl $3, CS-RBP(%rsi)
23757+ movq %rsp,%rdi /* arg1 for handler */
23758+ testb $3, CS(%rsi)
23759 je 1f
23760 SWAPGS
23761 /*
23762@@ -280,6 +686,18 @@ ENDPROC(native_usergs_sysret64)
23763 0x06 /* DW_OP_deref */, \
23764 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23765 0x22 /* DW_OP_plus */
23766+
23767+#ifdef CONFIG_PAX_MEMORY_UDEREF
23768+ testb $3, CS(%rdi)
23769+ jnz 1f
23770+ pax_enter_kernel
23771+ jmp 2f
23772+1: pax_enter_kernel_user
23773+2:
23774+#else
23775+ pax_enter_kernel
23776+#endif
23777+
23778 /* We entered an interrupt context - irqs are off: */
23779 TRACE_IRQS_OFF
23780 .endm
23781@@ -309,9 +727,52 @@ ENTRY(save_paranoid)
23782 js 1f /* negative -> in kernel */
23783 SWAPGS
23784 xorl %ebx,%ebx
23785-1: ret
23786+1:
23787+#ifdef CONFIG_PAX_MEMORY_UDEREF
23788+ testb $3, CS+8(%rsp)
23789+ jnz 1f
23790+ pax_enter_kernel
23791+ jmp 2f
23792+1: pax_enter_kernel_user
23793+2:
23794+#else
23795+ pax_enter_kernel
23796+#endif
23797+ pax_force_retaddr
23798+ ret
23799 CFI_ENDPROC
23800-END(save_paranoid)
23801+ENDPROC(save_paranoid)
23802+
23803+ENTRY(save_paranoid_nmi)
23804+ XCPT_FRAME 1 RDI+8
23805+ cld
23806+ movq_cfi rdi, RDI+8
23807+ movq_cfi rsi, RSI+8
23808+ movq_cfi rdx, RDX+8
23809+ movq_cfi rcx, RCX+8
23810+ movq_cfi rax, RAX+8
23811+ movq_cfi r8, R8+8
23812+ movq_cfi r9, R9+8
23813+ movq_cfi r10, R10+8
23814+ movq_cfi r11, R11+8
23815+ movq_cfi rbx, RBX+8
23816+ movq_cfi rbp, RBP+8
23817+ movq_cfi r12, R12+8
23818+ movq_cfi r13, R13+8
23819+ movq_cfi r14, R14+8
23820+ movq_cfi r15, R15+8
23821+ movl $1,%ebx
23822+ movl $MSR_GS_BASE,%ecx
23823+ rdmsr
23824+ testl %edx,%edx
23825+ js 1f /* negative -> in kernel */
23826+ SWAPGS
23827+ xorl %ebx,%ebx
23828+1: pax_enter_kernel_nmi
23829+ pax_force_retaddr
23830+ ret
23831+ CFI_ENDPROC
23832+ENDPROC(save_paranoid_nmi)
23833
23834 /*
23835 * A newly forked process directly context switches into this address.
23836@@ -332,7 +793,7 @@ ENTRY(ret_from_fork)
23837
23838 RESTORE_REST
23839
23840- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23841+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23842 jz 1f
23843
23844 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
23845@@ -342,15 +803,13 @@ ENTRY(ret_from_fork)
23846 jmp ret_from_sys_call # go to the SYSRET fastpath
23847
23848 1:
23849- subq $REST_SKIP, %rsp # leave space for volatiles
23850- CFI_ADJUST_CFA_OFFSET REST_SKIP
23851 movq %rbp, %rdi
23852 call *%rbx
23853 movl $0, RAX(%rsp)
23854 RESTORE_REST
23855 jmp int_ret_from_sys_call
23856 CFI_ENDPROC
23857-END(ret_from_fork)
23858+ENDPROC(ret_from_fork)
23859
23860 /*
23861 * System call entry. Up to 6 arguments in registers are supported.
23862@@ -387,7 +846,7 @@ END(ret_from_fork)
23863 ENTRY(system_call)
23864 CFI_STARTPROC simple
23865 CFI_SIGNAL_FRAME
23866- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23867+ CFI_DEF_CFA rsp,0
23868 CFI_REGISTER rip,rcx
23869 /*CFI_REGISTER rflags,r11*/
23870 SWAPGS_UNSAFE_STACK
23871@@ -400,16 +859,23 @@ GLOBAL(system_call_after_swapgs)
23872
23873 movq %rsp,PER_CPU_VAR(old_rsp)
23874 movq PER_CPU_VAR(kernel_stack),%rsp
23875+ SAVE_ARGS 8*6,0
23876+ pax_enter_kernel_user
23877+
23878+#ifdef CONFIG_PAX_RANDKSTACK
23879+ pax_erase_kstack
23880+#endif
23881+
23882 /*
23883 * No need to follow this irqs off/on section - it's straight
23884 * and short:
23885 */
23886 ENABLE_INTERRUPTS(CLBR_NONE)
23887- SAVE_ARGS 8,0
23888 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
23889 movq %rcx,RIP-ARGOFFSET(%rsp)
23890 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23891- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23892+ GET_THREAD_INFO(%rcx)
23893+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23894 jnz tracesys
23895 system_call_fastpath:
23896 #if __SYSCALL_MASK == ~0
23897@@ -433,10 +899,13 @@ sysret_check:
23898 LOCKDEP_SYS_EXIT
23899 DISABLE_INTERRUPTS(CLBR_NONE)
23900 TRACE_IRQS_OFF
23901- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
23902+ GET_THREAD_INFO(%rcx)
23903+ movl TI_flags(%rcx),%edx
23904 andl %edi,%edx
23905 jnz sysret_careful
23906 CFI_REMEMBER_STATE
23907+ pax_exit_kernel_user
23908+ pax_erase_kstack
23909 /*
23910 * sysretq will re-enable interrupts:
23911 */
23912@@ -495,6 +964,9 @@ auditsys:
23913 movq %rax,%rsi /* 2nd arg: syscall number */
23914 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
23915 call __audit_syscall_entry
23916+
23917+ pax_erase_kstack
23918+
23919 LOAD_ARGS 0 /* reload call-clobbered registers */
23920 jmp system_call_fastpath
23921
23922@@ -516,7 +988,7 @@ sysret_audit:
23923 /* Do syscall tracing */
23924 tracesys:
23925 #ifdef CONFIG_AUDITSYSCALL
23926- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23927+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
23928 jz auditsys
23929 #endif
23930 SAVE_REST
23931@@ -524,12 +996,15 @@ tracesys:
23932 FIXUP_TOP_OF_STACK %rdi
23933 movq %rsp,%rdi
23934 call syscall_trace_enter
23935+
23936+ pax_erase_kstack
23937+
23938 /*
23939 * Reload arg registers from stack in case ptrace changed them.
23940 * We don't reload %rax because syscall_trace_enter() returned
23941 * the value it wants us to use in the table lookup.
23942 */
23943- LOAD_ARGS ARGOFFSET, 1
23944+ LOAD_ARGS 1
23945 RESTORE_REST
23946 #if __SYSCALL_MASK == ~0
23947 cmpq $__NR_syscall_max,%rax
23948@@ -559,7 +1034,9 @@ GLOBAL(int_with_check)
23949 andl %edi,%edx
23950 jnz int_careful
23951 andl $~TS_COMPAT,TI_status(%rcx)
23952- jmp retint_swapgs
23953+ pax_exit_kernel_user
23954+ pax_erase_kstack
23955+ jmp retint_swapgs_pax
23956
23957 /* Either reschedule or signal or syscall exit tracking needed. */
23958 /* First do a reschedule test. */
23959@@ -605,7 +1082,7 @@ int_restore_rest:
23960 TRACE_IRQS_OFF
23961 jmp int_with_check
23962 CFI_ENDPROC
23963-END(system_call)
23964+ENDPROC(system_call)
23965
23966 .macro FORK_LIKE func
23967 ENTRY(stub_\func)
23968@@ -618,9 +1095,10 @@ ENTRY(stub_\func)
23969 DEFAULT_FRAME 0 8 /* offset 8: return address */
23970 call sys_\func
23971 RESTORE_TOP_OF_STACK %r11, 8
23972- ret $REST_SKIP /* pop extended registers */
23973+ pax_force_retaddr
23974+ ret
23975 CFI_ENDPROC
23976-END(stub_\func)
23977+ENDPROC(stub_\func)
23978 .endm
23979
23980 .macro FIXED_FRAME label,func
23981@@ -630,9 +1108,10 @@ ENTRY(\label)
23982 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23983 call \func
23984 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23985+ pax_force_retaddr
23986 ret
23987 CFI_ENDPROC
23988-END(\label)
23989+ENDPROC(\label)
23990 .endm
23991
23992 FORK_LIKE clone
23993@@ -640,19 +1119,6 @@ END(\label)
23994 FORK_LIKE vfork
23995 FIXED_FRAME stub_iopl, sys_iopl
23996
23997-ENTRY(ptregscall_common)
23998- DEFAULT_FRAME 1 8 /* offset 8: return address */
23999- RESTORE_TOP_OF_STACK %r11, 8
24000- movq_cfi_restore R15+8, r15
24001- movq_cfi_restore R14+8, r14
24002- movq_cfi_restore R13+8, r13
24003- movq_cfi_restore R12+8, r12
24004- movq_cfi_restore RBP+8, rbp
24005- movq_cfi_restore RBX+8, rbx
24006- ret $REST_SKIP /* pop extended registers */
24007- CFI_ENDPROC
24008-END(ptregscall_common)
24009-
24010 ENTRY(stub_execve)
24011 CFI_STARTPROC
24012 addq $8, %rsp
24013@@ -664,7 +1130,7 @@ ENTRY(stub_execve)
24014 RESTORE_REST
24015 jmp int_ret_from_sys_call
24016 CFI_ENDPROC
24017-END(stub_execve)
24018+ENDPROC(stub_execve)
24019
24020 /*
24021 * sigreturn is special because it needs to restore all registers on return.
24022@@ -681,7 +1147,7 @@ ENTRY(stub_rt_sigreturn)
24023 RESTORE_REST
24024 jmp int_ret_from_sys_call
24025 CFI_ENDPROC
24026-END(stub_rt_sigreturn)
24027+ENDPROC(stub_rt_sigreturn)
24028
24029 #ifdef CONFIG_X86_X32_ABI
24030 ENTRY(stub_x32_rt_sigreturn)
24031@@ -695,7 +1161,7 @@ ENTRY(stub_x32_rt_sigreturn)
24032 RESTORE_REST
24033 jmp int_ret_from_sys_call
24034 CFI_ENDPROC
24035-END(stub_x32_rt_sigreturn)
24036+ENDPROC(stub_x32_rt_sigreturn)
24037
24038 ENTRY(stub_x32_execve)
24039 CFI_STARTPROC
24040@@ -709,7 +1175,7 @@ ENTRY(stub_x32_execve)
24041 RESTORE_REST
24042 jmp int_ret_from_sys_call
24043 CFI_ENDPROC
24044-END(stub_x32_execve)
24045+ENDPROC(stub_x32_execve)
24046
24047 #endif
24048
24049@@ -746,7 +1212,7 @@ vector=vector+1
24050 2: jmp common_interrupt
24051 .endr
24052 CFI_ENDPROC
24053-END(irq_entries_start)
24054+ENDPROC(irq_entries_start)
24055
24056 .previous
24057 END(interrupt)
24058@@ -763,8 +1229,8 @@ END(interrupt)
24059 /* 0(%rsp): ~(interrupt number) */
24060 .macro interrupt func
24061 /* reserve pt_regs for scratch regs and rbp */
24062- subq $ORIG_RAX-RBP, %rsp
24063- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
24064+ subq $ORIG_RAX, %rsp
24065+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
24066 SAVE_ARGS_IRQ
24067 call \func
24068 .endm
24069@@ -787,14 +1253,14 @@ ret_from_intr:
24070
24071 /* Restore saved previous stack */
24072 popq %rsi
24073- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
24074- leaq ARGOFFSET-RBP(%rsi), %rsp
24075+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
24076+ movq %rsi, %rsp
24077 CFI_DEF_CFA_REGISTER rsp
24078- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
24079+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
24080
24081 exit_intr:
24082 GET_THREAD_INFO(%rcx)
24083- testl $3,CS-ARGOFFSET(%rsp)
24084+ testb $3,CS-ARGOFFSET(%rsp)
24085 je retint_kernel
24086
24087 /* Interrupt came from user space */
24088@@ -816,12 +1282,16 @@ retint_swapgs: /* return to user-space */
24089 * The iretq could re-enable interrupts:
24090 */
24091 DISABLE_INTERRUPTS(CLBR_ANY)
24092+ pax_exit_kernel_user
24093+retint_swapgs_pax:
24094 TRACE_IRQS_IRETQ
24095 SWAPGS
24096 jmp restore_args
24097
24098 retint_restore_args: /* return to kernel space */
24099 DISABLE_INTERRUPTS(CLBR_ANY)
24100+ pax_exit_kernel
24101+ pax_force_retaddr (RIP-ARGOFFSET)
24102 /*
24103 * The iretq could re-enable interrupts:
24104 */
24105@@ -934,7 +1404,7 @@ ENTRY(retint_kernel)
24106 jmp exit_intr
24107 #endif
24108 CFI_ENDPROC
24109-END(common_interrupt)
24110+ENDPROC(common_interrupt)
24111
24112 /*
24113 * If IRET takes a fault on the espfix stack, then we
24114@@ -956,13 +1426,13 @@ __do_double_fault:
24115 cmpq $native_irq_return_iret,%rax
24116 jne do_double_fault /* This shouldn't happen... */
24117 movq PER_CPU_VAR(kernel_stack),%rax
24118- subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
24119+ subq $(6*8),%rax /* Reset to original stack */
24120 movq %rax,RSP(%rdi)
24121 movq $0,(%rax) /* Missing (lost) #GP error code */
24122 movq $general_protection,RIP(%rdi)
24123 retq
24124 CFI_ENDPROC
24125-END(__do_double_fault)
24126+ENDPROC(__do_double_fault)
24127 #else
24128 # define __do_double_fault do_double_fault
24129 #endif
24130@@ -979,7 +1449,7 @@ ENTRY(\sym)
24131 interrupt \do_sym
24132 jmp ret_from_intr
24133 CFI_ENDPROC
24134-END(\sym)
24135+ENDPROC(\sym)
24136 .endm
24137
24138 #ifdef CONFIG_TRACING
24139@@ -1052,7 +1522,7 @@ apicinterrupt IRQ_WORK_VECTOR \
24140 /*
24141 * Exception entry points.
24142 */
24143-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
24144+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
24145
24146 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
24147 ENTRY(\sym)
24148@@ -1103,6 +1573,12 @@ ENTRY(\sym)
24149 .endif
24150
24151 .if \shift_ist != -1
24152+#ifdef CONFIG_SMP
24153+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24154+ lea init_tss(%r13), %r13
24155+#else
24156+ lea init_tss(%rip), %r13
24157+#endif
24158 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
24159 .endif
24160
24161@@ -1119,7 +1595,7 @@ ENTRY(\sym)
24162 .endif
24163
24164 CFI_ENDPROC
24165-END(\sym)
24166+ENDPROC(\sym)
24167 .endm
24168
24169 #ifdef CONFIG_TRACING
24170@@ -1160,9 +1636,10 @@ gs_change:
24171 2: mfence /* workaround */
24172 SWAPGS
24173 popfq_cfi
24174+ pax_force_retaddr
24175 ret
24176 CFI_ENDPROC
24177-END(native_load_gs_index)
24178+ENDPROC(native_load_gs_index)
24179
24180 _ASM_EXTABLE(gs_change,bad_gs)
24181 .section .fixup,"ax"
24182@@ -1190,9 +1667,10 @@ ENTRY(do_softirq_own_stack)
24183 CFI_DEF_CFA_REGISTER rsp
24184 CFI_ADJUST_CFA_OFFSET -8
24185 decl PER_CPU_VAR(irq_count)
24186+ pax_force_retaddr
24187 ret
24188 CFI_ENDPROC
24189-END(do_softirq_own_stack)
24190+ENDPROC(do_softirq_own_stack)
24191
24192 #ifdef CONFIG_XEN
24193 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24194@@ -1230,7 +1708,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24195 decl PER_CPU_VAR(irq_count)
24196 jmp error_exit
24197 CFI_ENDPROC
24198-END(xen_do_hypervisor_callback)
24199+ENDPROC(xen_do_hypervisor_callback)
24200
24201 /*
24202 * Hypervisor uses this for application faults while it executes.
24203@@ -1289,7 +1767,7 @@ ENTRY(xen_failsafe_callback)
24204 SAVE_ALL
24205 jmp error_exit
24206 CFI_ENDPROC
24207-END(xen_failsafe_callback)
24208+ENDPROC(xen_failsafe_callback)
24209
24210 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24211 xen_hvm_callback_vector xen_evtchn_do_upcall
24212@@ -1336,18 +1814,33 @@ ENTRY(paranoid_exit)
24213 DEFAULT_FRAME
24214 DISABLE_INTERRUPTS(CLBR_NONE)
24215 TRACE_IRQS_OFF_DEBUG
24216- testl %ebx,%ebx /* swapgs needed? */
24217+ testl $1,%ebx /* swapgs needed? */
24218 jnz paranoid_restore
24219- testl $3,CS(%rsp)
24220+ testb $3,CS(%rsp)
24221 jnz paranoid_userspace
24222+#ifdef CONFIG_PAX_MEMORY_UDEREF
24223+ pax_exit_kernel
24224+ TRACE_IRQS_IRETQ 0
24225+ SWAPGS_UNSAFE_STACK
24226+ RESTORE_ALL 8
24227+ pax_force_retaddr_bts
24228+ jmp irq_return
24229+#endif
24230 paranoid_swapgs:
24231+#ifdef CONFIG_PAX_MEMORY_UDEREF
24232+ pax_exit_kernel_user
24233+#else
24234+ pax_exit_kernel
24235+#endif
24236 TRACE_IRQS_IRETQ 0
24237 SWAPGS_UNSAFE_STACK
24238 RESTORE_ALL 8
24239 jmp irq_return
24240 paranoid_restore:
24241+ pax_exit_kernel
24242 TRACE_IRQS_IRETQ_DEBUG 0
24243 RESTORE_ALL 8
24244+ pax_force_retaddr_bts
24245 jmp irq_return
24246 paranoid_userspace:
24247 GET_THREAD_INFO(%rcx)
24248@@ -1376,7 +1869,7 @@ paranoid_schedule:
24249 TRACE_IRQS_OFF
24250 jmp paranoid_userspace
24251 CFI_ENDPROC
24252-END(paranoid_exit)
24253+ENDPROC(paranoid_exit)
24254
24255 /*
24256 * Exception entry point. This expects an error code/orig_rax on the stack.
24257@@ -1403,12 +1896,23 @@ ENTRY(error_entry)
24258 movq_cfi r14, R14+8
24259 movq_cfi r15, R15+8
24260 xorl %ebx,%ebx
24261- testl $3,CS+8(%rsp)
24262+ testb $3,CS+8(%rsp)
24263 je error_kernelspace
24264 error_swapgs:
24265 SWAPGS
24266 error_sti:
24267+#ifdef CONFIG_PAX_MEMORY_UDEREF
24268+ testb $3, CS+8(%rsp)
24269+ jnz 1f
24270+ pax_enter_kernel
24271+ jmp 2f
24272+1: pax_enter_kernel_user
24273+2:
24274+#else
24275+ pax_enter_kernel
24276+#endif
24277 TRACE_IRQS_OFF
24278+ pax_force_retaddr
24279 ret
24280
24281 /*
24282@@ -1435,7 +1939,7 @@ bstep_iret:
24283 movq %rcx,RIP+8(%rsp)
24284 jmp error_swapgs
24285 CFI_ENDPROC
24286-END(error_entry)
24287+ENDPROC(error_entry)
24288
24289
24290 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24291@@ -1446,7 +1950,7 @@ ENTRY(error_exit)
24292 DISABLE_INTERRUPTS(CLBR_NONE)
24293 TRACE_IRQS_OFF
24294 GET_THREAD_INFO(%rcx)
24295- testl %eax,%eax
24296+ testl $1,%eax
24297 jne retint_kernel
24298 LOCKDEP_SYS_EXIT_IRQ
24299 movl TI_flags(%rcx),%edx
24300@@ -1455,7 +1959,7 @@ ENTRY(error_exit)
24301 jnz retint_careful
24302 jmp retint_swapgs
24303 CFI_ENDPROC
24304-END(error_exit)
24305+ENDPROC(error_exit)
24306
24307 /*
24308 * Test if a given stack is an NMI stack or not.
24309@@ -1513,9 +2017,11 @@ ENTRY(nmi)
24310 * If %cs was not the kernel segment, then the NMI triggered in user
24311 * space, which means it is definitely not nested.
24312 */
24313+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24314+ je 1f
24315 cmpl $__KERNEL_CS, 16(%rsp)
24316 jne first_nmi
24317-
24318+1:
24319 /*
24320 * Check the special variable on the stack to see if NMIs are
24321 * executing.
24322@@ -1549,8 +2055,7 @@ nested_nmi:
24323
24324 1:
24325 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24326- leaq -1*8(%rsp), %rdx
24327- movq %rdx, %rsp
24328+ subq $8, %rsp
24329 CFI_ADJUST_CFA_OFFSET 1*8
24330 leaq -10*8(%rsp), %rdx
24331 pushq_cfi $__KERNEL_DS
24332@@ -1568,6 +2073,7 @@ nested_nmi_out:
24333 CFI_RESTORE rdx
24334
24335 /* No need to check faults here */
24336+# pax_force_retaddr_bts
24337 INTERRUPT_RETURN
24338
24339 CFI_RESTORE_STATE
24340@@ -1664,13 +2170,13 @@ end_repeat_nmi:
24341 subq $ORIG_RAX-R15, %rsp
24342 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24343 /*
24344- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24345+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24346 * as we should not be calling schedule in NMI context.
24347 * Even with normal interrupts enabled. An NMI should not be
24348 * setting NEED_RESCHED or anything that normal interrupts and
24349 * exceptions might do.
24350 */
24351- call save_paranoid
24352+ call save_paranoid_nmi
24353 DEFAULT_FRAME 0
24354
24355 /*
24356@@ -1680,9 +2186,9 @@ end_repeat_nmi:
24357 * NMI itself takes a page fault, the page fault that was preempted
24358 * will read the information from the NMI page fault and not the
24359 * origin fault. Save it off and restore it if it changes.
24360- * Use the r12 callee-saved register.
24361+ * Use the r13 callee-saved register.
24362 */
24363- movq %cr2, %r12
24364+ movq %cr2, %r13
24365
24366 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24367 movq %rsp,%rdi
24368@@ -1691,29 +2197,34 @@ end_repeat_nmi:
24369
24370 /* Did the NMI take a page fault? Restore cr2 if it did */
24371 movq %cr2, %rcx
24372- cmpq %rcx, %r12
24373+ cmpq %rcx, %r13
24374 je 1f
24375- movq %r12, %cr2
24376+ movq %r13, %cr2
24377 1:
24378
24379- testl %ebx,%ebx /* swapgs needed? */
24380+ testl $1,%ebx /* swapgs needed? */
24381 jnz nmi_restore
24382 nmi_swapgs:
24383 SWAPGS_UNSAFE_STACK
24384 nmi_restore:
24385+ pax_exit_kernel_nmi
24386 /* Pop the extra iret frame at once */
24387 RESTORE_ALL 6*8
24388+ testb $3, 8(%rsp)
24389+ jnz 1f
24390+ pax_force_retaddr_bts
24391+1:
24392
24393 /* Clear the NMI executing stack variable */
24394 movq $0, 5*8(%rsp)
24395 jmp irq_return
24396 CFI_ENDPROC
24397-END(nmi)
24398+ENDPROC(nmi)
24399
24400 ENTRY(ignore_sysret)
24401 CFI_STARTPROC
24402 mov $-ENOSYS,%eax
24403 sysret
24404 CFI_ENDPROC
24405-END(ignore_sysret)
24406+ENDPROC(ignore_sysret)
24407
24408diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24409index 94d857f..bf1f0bf 100644
24410--- a/arch/x86/kernel/espfix_64.c
24411+++ b/arch/x86/kernel/espfix_64.c
24412@@ -197,7 +197,7 @@ void init_espfix_ap(void)
24413 set_pte(&pte_p[n*PTE_STRIDE], pte);
24414
24415 /* Job is done for this CPU and any CPU which shares this page */
24416- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24417+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24418
24419 unlock_done:
24420 mutex_unlock(&espfix_init_mutex);
24421diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24422index cbc4a91..b38ee45 100644
24423--- a/arch/x86/kernel/ftrace.c
24424+++ b/arch/x86/kernel/ftrace.c
24425@@ -88,7 +88,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24426 * kernel identity mapping to modify code.
24427 */
24428 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24429- ip = (unsigned long)__va(__pa_symbol(ip));
24430+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24431
24432 return ip;
24433 }
24434@@ -104,6 +104,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24435 {
24436 unsigned char replaced[MCOUNT_INSN_SIZE];
24437
24438+ ip = ktla_ktva(ip);
24439+
24440 /*
24441 * Note: Due to modules and __init, code can
24442 * disappear and change, we need to protect against faulting
24443@@ -229,7 +231,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24444 unsigned char old[MCOUNT_INSN_SIZE];
24445 int ret;
24446
24447- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24448+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24449
24450 ftrace_update_func = ip;
24451 /* Make sure the breakpoints see the ftrace_update_func update */
24452@@ -310,7 +312,7 @@ static int add_break(unsigned long ip, const char *old)
24453 unsigned char replaced[MCOUNT_INSN_SIZE];
24454 unsigned char brk = BREAKPOINT_INSTRUCTION;
24455
24456- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24457+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24458 return -EFAULT;
24459
24460 /* Make sure it is what we expect it to be */
24461diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24462index eda1a86..8f6df48 100644
24463--- a/arch/x86/kernel/head64.c
24464+++ b/arch/x86/kernel/head64.c
24465@@ -67,12 +67,12 @@ again:
24466 pgd = *pgd_p;
24467
24468 /*
24469- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24470- * critical -- __PAGE_OFFSET would point us back into the dynamic
24471+ * The use of __early_va rather than __va here is critical:
24472+ * __va would point us back into the dynamic
24473 * range and we might end up looping forever...
24474 */
24475 if (pgd)
24476- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24477+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24478 else {
24479 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24480 reset_early_page_tables();
24481@@ -82,13 +82,13 @@ again:
24482 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24483 for (i = 0; i < PTRS_PER_PUD; i++)
24484 pud_p[i] = 0;
24485- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24486+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24487 }
24488 pud_p += pud_index(address);
24489 pud = *pud_p;
24490
24491 if (pud)
24492- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24493+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24494 else {
24495 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24496 reset_early_page_tables();
24497@@ -98,7 +98,7 @@ again:
24498 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24499 for (i = 0; i < PTRS_PER_PMD; i++)
24500 pmd_p[i] = 0;
24501- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24502+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24503 }
24504 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24505 pmd_p[pmd_index(address)] = pmd;
24506@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24507 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24508 early_printk("Kernel alive\n");
24509
24510- clear_page(init_level4_pgt);
24511 /* set init_level4_pgt kernel high mapping*/
24512 init_level4_pgt[511] = early_level4_pgt[511];
24513
24514diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24515index f36bd42..0ab4474 100644
24516--- a/arch/x86/kernel/head_32.S
24517+++ b/arch/x86/kernel/head_32.S
24518@@ -26,6 +26,12 @@
24519 /* Physical address */
24520 #define pa(X) ((X) - __PAGE_OFFSET)
24521
24522+#ifdef CONFIG_PAX_KERNEXEC
24523+#define ta(X) (X)
24524+#else
24525+#define ta(X) ((X) - __PAGE_OFFSET)
24526+#endif
24527+
24528 /*
24529 * References to members of the new_cpu_data structure.
24530 */
24531@@ -55,11 +61,7 @@
24532 * and small than max_low_pfn, otherwise will waste some page table entries
24533 */
24534
24535-#if PTRS_PER_PMD > 1
24536-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24537-#else
24538-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24539-#endif
24540+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24541
24542 /* Number of possible pages in the lowmem region */
24543 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24544@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24545 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24546
24547 /*
24548+ * Real beginning of normal "text" segment
24549+ */
24550+ENTRY(stext)
24551+ENTRY(_stext)
24552+
24553+/*
24554 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24555 * %esi points to the real-mode code as a 32-bit pointer.
24556 * CS and DS must be 4 GB flat segments, but we don't depend on
24557@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24558 * can.
24559 */
24560 __HEAD
24561+
24562+#ifdef CONFIG_PAX_KERNEXEC
24563+ jmp startup_32
24564+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24565+.fill PAGE_SIZE-5,1,0xcc
24566+#endif
24567+
24568 ENTRY(startup_32)
24569 movl pa(stack_start),%ecx
24570
24571@@ -106,6 +121,59 @@ ENTRY(startup_32)
24572 2:
24573 leal -__PAGE_OFFSET(%ecx),%esp
24574
24575+#ifdef CONFIG_SMP
24576+ movl $pa(cpu_gdt_table),%edi
24577+ movl $__per_cpu_load,%eax
24578+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24579+ rorl $16,%eax
24580+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24581+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24582+ movl $__per_cpu_end - 1,%eax
24583+ subl $__per_cpu_start,%eax
24584+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24585+#endif
24586+
24587+#ifdef CONFIG_PAX_MEMORY_UDEREF
24588+ movl $NR_CPUS,%ecx
24589+ movl $pa(cpu_gdt_table),%edi
24590+1:
24591+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24592+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24593+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24594+ addl $PAGE_SIZE_asm,%edi
24595+ loop 1b
24596+#endif
24597+
24598+#ifdef CONFIG_PAX_KERNEXEC
24599+ movl $pa(boot_gdt),%edi
24600+ movl $__LOAD_PHYSICAL_ADDR,%eax
24601+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24602+ rorl $16,%eax
24603+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24604+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24605+ rorl $16,%eax
24606+
24607+ ljmp $(__BOOT_CS),$1f
24608+1:
24609+
24610+ movl $NR_CPUS,%ecx
24611+ movl $pa(cpu_gdt_table),%edi
24612+ addl $__PAGE_OFFSET,%eax
24613+1:
24614+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24615+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24616+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24617+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24618+ rorl $16,%eax
24619+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24620+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24621+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24622+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24623+ rorl $16,%eax
24624+ addl $PAGE_SIZE_asm,%edi
24625+ loop 1b
24626+#endif
24627+
24628 /*
24629 * Clear BSS first so that there are no surprises...
24630 */
24631@@ -201,8 +269,11 @@ ENTRY(startup_32)
24632 movl %eax, pa(max_pfn_mapped)
24633
24634 /* Do early initialization of the fixmap area */
24635- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24636- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24637+#ifdef CONFIG_COMPAT_VDSO
24638+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24639+#else
24640+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24641+#endif
24642 #else /* Not PAE */
24643
24644 page_pde_offset = (__PAGE_OFFSET >> 20);
24645@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24646 movl %eax, pa(max_pfn_mapped)
24647
24648 /* Do early initialization of the fixmap area */
24649- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24650- movl %eax,pa(initial_page_table+0xffc)
24651+#ifdef CONFIG_COMPAT_VDSO
24652+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24653+#else
24654+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24655+#endif
24656 #endif
24657
24658 #ifdef CONFIG_PARAVIRT
24659@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24660 cmpl $num_subarch_entries, %eax
24661 jae bad_subarch
24662
24663- movl pa(subarch_entries)(,%eax,4), %eax
24664- subl $__PAGE_OFFSET, %eax
24665- jmp *%eax
24666+ jmp *pa(subarch_entries)(,%eax,4)
24667
24668 bad_subarch:
24669 WEAK(lguest_entry)
24670@@ -261,10 +333,10 @@ WEAK(xen_entry)
24671 __INITDATA
24672
24673 subarch_entries:
24674- .long default_entry /* normal x86/PC */
24675- .long lguest_entry /* lguest hypervisor */
24676- .long xen_entry /* Xen hypervisor */
24677- .long default_entry /* Moorestown MID */
24678+ .long ta(default_entry) /* normal x86/PC */
24679+ .long ta(lguest_entry) /* lguest hypervisor */
24680+ .long ta(xen_entry) /* Xen hypervisor */
24681+ .long ta(default_entry) /* Moorestown MID */
24682 num_subarch_entries = (. - subarch_entries) / 4
24683 .previous
24684 #else
24685@@ -354,6 +426,7 @@ default_entry:
24686 movl pa(mmu_cr4_features),%eax
24687 movl %eax,%cr4
24688
24689+#ifdef CONFIG_X86_PAE
24690 testb $X86_CR4_PAE, %al # check if PAE is enabled
24691 jz enable_paging
24692
24693@@ -382,6 +455,9 @@ default_entry:
24694 /* Make changes effective */
24695 wrmsr
24696
24697+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24698+#endif
24699+
24700 enable_paging:
24701
24702 /*
24703@@ -449,14 +525,20 @@ is486:
24704 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24705 movl %eax,%ss # after changing gdt.
24706
24707- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24708+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24709 movl %eax,%ds
24710 movl %eax,%es
24711
24712 movl $(__KERNEL_PERCPU), %eax
24713 movl %eax,%fs # set this cpu's percpu
24714
24715+#ifdef CONFIG_CC_STACKPROTECTOR
24716 movl $(__KERNEL_STACK_CANARY),%eax
24717+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24718+ movl $(__USER_DS),%eax
24719+#else
24720+ xorl %eax,%eax
24721+#endif
24722 movl %eax,%gs
24723
24724 xorl %eax,%eax # Clear LDT
24725@@ -512,8 +594,11 @@ setup_once:
24726 * relocation. Manually set base address in stack canary
24727 * segment descriptor.
24728 */
24729- movl $gdt_page,%eax
24730+ movl $cpu_gdt_table,%eax
24731 movl $stack_canary,%ecx
24732+#ifdef CONFIG_SMP
24733+ addl $__per_cpu_load,%ecx
24734+#endif
24735 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24736 shrl $16, %ecx
24737 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24738@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24739 cmpl $2,(%esp) # X86_TRAP_NMI
24740 je is_nmi # Ignore NMI
24741
24742- cmpl $2,%ss:early_recursion_flag
24743+ cmpl $1,%ss:early_recursion_flag
24744 je hlt_loop
24745 incl %ss:early_recursion_flag
24746
24747@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24748 pushl (20+6*4)(%esp) /* trapno */
24749 pushl $fault_msg
24750 call printk
24751-#endif
24752 call dump_stack
24753+#endif
24754 hlt_loop:
24755 hlt
24756 jmp hlt_loop
24757@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24758 /* This is the default interrupt "handler" :-) */
24759 ALIGN
24760 ignore_int:
24761- cld
24762 #ifdef CONFIG_PRINTK
24763+ cmpl $2,%ss:early_recursion_flag
24764+ je hlt_loop
24765+ incl %ss:early_recursion_flag
24766+ cld
24767 pushl %eax
24768 pushl %ecx
24769 pushl %edx
24770@@ -617,9 +705,6 @@ ignore_int:
24771 movl $(__KERNEL_DS),%eax
24772 movl %eax,%ds
24773 movl %eax,%es
24774- cmpl $2,early_recursion_flag
24775- je hlt_loop
24776- incl early_recursion_flag
24777 pushl 16(%esp)
24778 pushl 24(%esp)
24779 pushl 32(%esp)
24780@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24781 /*
24782 * BSS section
24783 */
24784-__PAGE_ALIGNED_BSS
24785- .align PAGE_SIZE
24786 #ifdef CONFIG_X86_PAE
24787+.section .initial_pg_pmd,"a",@progbits
24788 initial_pg_pmd:
24789 .fill 1024*KPMDS,4,0
24790 #else
24791+.section .initial_page_table,"a",@progbits
24792 ENTRY(initial_page_table)
24793 .fill 1024,4,0
24794 #endif
24795+.section .initial_pg_fixmap,"a",@progbits
24796 initial_pg_fixmap:
24797 .fill 1024,4,0
24798+.section .empty_zero_page,"a",@progbits
24799 ENTRY(empty_zero_page)
24800 .fill 4096,1,0
24801+.section .swapper_pg_dir,"a",@progbits
24802 ENTRY(swapper_pg_dir)
24803+#ifdef CONFIG_X86_PAE
24804+ .fill 4,8,0
24805+#else
24806 .fill 1024,4,0
24807+#endif
24808
24809 /*
24810 * This starts the data section.
24811 */
24812 #ifdef CONFIG_X86_PAE
24813-__PAGE_ALIGNED_DATA
24814- /* Page-aligned for the benefit of paravirt? */
24815- .align PAGE_SIZE
24816+.section .initial_page_table,"a",@progbits
24817 ENTRY(initial_page_table)
24818 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24819 # if KPMDS == 3
24820@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24821 # error "Kernel PMDs should be 1, 2 or 3"
24822 # endif
24823 .align PAGE_SIZE /* needs to be page-sized too */
24824+
24825+#ifdef CONFIG_PAX_PER_CPU_PGD
24826+ENTRY(cpu_pgd)
24827+ .rept 2*NR_CPUS
24828+ .fill 4,8,0
24829+ .endr
24830+#endif
24831+
24832 #endif
24833
24834 .data
24835 .balign 4
24836 ENTRY(stack_start)
24837- .long init_thread_union+THREAD_SIZE
24838+ .long init_thread_union+THREAD_SIZE-8
24839
24840 __INITRODATA
24841 int_msg:
24842@@ -727,7 +825,7 @@ fault_msg:
24843 * segment size, and 32-bit linear address value:
24844 */
24845
24846- .data
24847+.section .rodata,"a",@progbits
24848 .globl boot_gdt_descr
24849 .globl idt_descr
24850
24851@@ -736,7 +834,7 @@ fault_msg:
24852 .word 0 # 32 bit align gdt_desc.address
24853 boot_gdt_descr:
24854 .word __BOOT_DS+7
24855- .long boot_gdt - __PAGE_OFFSET
24856+ .long pa(boot_gdt)
24857
24858 .word 0 # 32-bit align idt_desc.address
24859 idt_descr:
24860@@ -747,7 +845,7 @@ idt_descr:
24861 .word 0 # 32 bit align gdt_desc.address
24862 ENTRY(early_gdt_descr)
24863 .word GDT_ENTRIES*8-1
24864- .long gdt_page /* Overwritten for secondary CPUs */
24865+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24866
24867 /*
24868 * The boot_gdt must mirror the equivalent in setup.S and is
24869@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24870 .align L1_CACHE_BYTES
24871 ENTRY(boot_gdt)
24872 .fill GDT_ENTRY_BOOT_CS,8,0
24873- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24874- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24875+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24876+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24877+
24878+ .align PAGE_SIZE_asm
24879+ENTRY(cpu_gdt_table)
24880+ .rept NR_CPUS
24881+ .quad 0x0000000000000000 /* NULL descriptor */
24882+ .quad 0x0000000000000000 /* 0x0b reserved */
24883+ .quad 0x0000000000000000 /* 0x13 reserved */
24884+ .quad 0x0000000000000000 /* 0x1b reserved */
24885+
24886+#ifdef CONFIG_PAX_KERNEXEC
24887+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24888+#else
24889+ .quad 0x0000000000000000 /* 0x20 unused */
24890+#endif
24891+
24892+ .quad 0x0000000000000000 /* 0x28 unused */
24893+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24894+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24895+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24896+ .quad 0x0000000000000000 /* 0x4b reserved */
24897+ .quad 0x0000000000000000 /* 0x53 reserved */
24898+ .quad 0x0000000000000000 /* 0x5b reserved */
24899+
24900+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24901+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24902+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24903+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24904+
24905+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24906+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24907+
24908+ /*
24909+ * Segments used for calling PnP BIOS have byte granularity.
24910+ * The code segments and data segments have fixed 64k limits,
24911+ * the transfer segment sizes are set at run time.
24912+ */
24913+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24914+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24915+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24916+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24917+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24918+
24919+ /*
24920+ * The APM segments have byte granularity and their bases
24921+ * are set at run time. All have 64k limits.
24922+ */
24923+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24924+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24925+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24926+
24927+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24928+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24929+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24930+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24931+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24932+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24933+
24934+ /* Be sure this is zeroed to avoid false validations in Xen */
24935+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24936+ .endr
24937diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24938index a468c0a..c7dec74 100644
24939--- a/arch/x86/kernel/head_64.S
24940+++ b/arch/x86/kernel/head_64.S
24941@@ -20,6 +20,8 @@
24942 #include <asm/processor-flags.h>
24943 #include <asm/percpu.h>
24944 #include <asm/nops.h>
24945+#include <asm/cpufeature.h>
24946+#include <asm/alternative-asm.h>
24947
24948 #ifdef CONFIG_PARAVIRT
24949 #include <asm/asm-offsets.h>
24950@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24951 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24952 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24953 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24954+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24955+L3_VMALLOC_START = pud_index(VMALLOC_START)
24956+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24957+L3_VMALLOC_END = pud_index(VMALLOC_END)
24958+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24959+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24960
24961 .text
24962 __HEAD
24963@@ -89,11 +97,24 @@ startup_64:
24964 * Fixup the physical addresses in the page table
24965 */
24966 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24967+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24968+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24969+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24970+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24971+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24972
24973- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24974- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24975+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24976+#ifndef CONFIG_XEN
24977+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24978+#endif
24979+
24980+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24981+
24982+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24983+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24984
24985 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24986+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24987
24988 /*
24989 * Set up the identity mapping for the switchover. These
24990@@ -177,8 +198,8 @@ ENTRY(secondary_startup_64)
24991 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24992 1:
24993
24994- /* Enable PAE mode and PGE */
24995- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24996+ /* Enable PAE mode and PSE/PGE */
24997+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24998 movq %rcx, %cr4
24999
25000 /* Setup early boot stage 4 level pagetables. */
25001@@ -199,10 +220,19 @@ ENTRY(secondary_startup_64)
25002 movl $MSR_EFER, %ecx
25003 rdmsr
25004 btsl $_EFER_SCE, %eax /* Enable System Call */
25005- btl $20,%edi /* No Execute supported? */
25006+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
25007 jnc 1f
25008 btsl $_EFER_NX, %eax
25009 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
25010+#ifndef CONFIG_EFI
25011+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
25012+#endif
25013+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
25014+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
25015+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
25016+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
25017+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
25018+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
25019 1: wrmsr /* Make changes effective */
25020
25021 /* Setup cr0 */
25022@@ -282,6 +312,7 @@ ENTRY(secondary_startup_64)
25023 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
25024 * address given in m16:64.
25025 */
25026+ pax_set_fptr_mask
25027 movq initial_code(%rip),%rax
25028 pushq $0 # fake return address to stop unwinder
25029 pushq $__KERNEL_CS # set correct cs
25030@@ -313,7 +344,7 @@ ENDPROC(start_cpu0)
25031 .quad INIT_PER_CPU_VAR(irq_stack_union)
25032
25033 GLOBAL(stack_start)
25034- .quad init_thread_union+THREAD_SIZE-8
25035+ .quad init_thread_union+THREAD_SIZE-16
25036 .word 0
25037 __FINITDATA
25038
25039@@ -391,7 +422,7 @@ ENTRY(early_idt_handler)
25040 call dump_stack
25041 #ifdef CONFIG_KALLSYMS
25042 leaq early_idt_ripmsg(%rip),%rdi
25043- movq 40(%rsp),%rsi # %rip again
25044+ movq 88(%rsp),%rsi # %rip again
25045 call __print_symbol
25046 #endif
25047 #endif /* EARLY_PRINTK */
25048@@ -420,6 +451,7 @@ ENDPROC(early_idt_handler)
25049 early_recursion_flag:
25050 .long 0
25051
25052+ .section .rodata,"a",@progbits
25053 #ifdef CONFIG_EARLY_PRINTK
25054 early_idt_msg:
25055 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
25056@@ -447,29 +479,52 @@ NEXT_PAGE(early_level4_pgt)
25057 NEXT_PAGE(early_dynamic_pgts)
25058 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
25059
25060- .data
25061+ .section .rodata,"a",@progbits
25062
25063-#ifndef CONFIG_XEN
25064 NEXT_PAGE(init_level4_pgt)
25065- .fill 512,8,0
25066-#else
25067-NEXT_PAGE(init_level4_pgt)
25068- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25069 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
25070 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25071+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
25072+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
25073+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
25074+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
25075+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
25076+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25077 .org init_level4_pgt + L4_START_KERNEL*8, 0
25078 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
25079 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
25080
25081+#ifdef CONFIG_PAX_PER_CPU_PGD
25082+NEXT_PAGE(cpu_pgd)
25083+ .rept 2*NR_CPUS
25084+ .fill 512,8,0
25085+ .endr
25086+#endif
25087+
25088 NEXT_PAGE(level3_ident_pgt)
25089 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25090+#ifdef CONFIG_XEN
25091 .fill 511, 8, 0
25092+#else
25093+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
25094+ .fill 510,8,0
25095+#endif
25096+
25097+NEXT_PAGE(level3_vmalloc_start_pgt)
25098+ .fill 512,8,0
25099+
25100+NEXT_PAGE(level3_vmalloc_end_pgt)
25101+ .fill 512,8,0
25102+
25103+NEXT_PAGE(level3_vmemmap_pgt)
25104+ .fill L3_VMEMMAP_START,8,0
25105+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25106+
25107 NEXT_PAGE(level2_ident_pgt)
25108- /* Since I easily can, map the first 1G.
25109+ /* Since I easily can, map the first 2G.
25110 * Don't set NX because code runs from these pages.
25111 */
25112- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25113-#endif
25114+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25115
25116 NEXT_PAGE(level3_kernel_pgt)
25117 .fill L3_START_KERNEL,8,0
25118@@ -477,6 +532,9 @@ NEXT_PAGE(level3_kernel_pgt)
25119 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25120 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25121
25122+NEXT_PAGE(level2_vmemmap_pgt)
25123+ .fill 512,8,0
25124+
25125 NEXT_PAGE(level2_kernel_pgt)
25126 /*
25127 * 512 MB kernel mapping. We spend a full page on this pagetable
25128@@ -494,28 +552,64 @@ NEXT_PAGE(level2_kernel_pgt)
25129 NEXT_PAGE(level2_fixmap_pgt)
25130 .fill 506,8,0
25131 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25132- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25133- .fill 5,8,0
25134+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25135+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25136+ .fill 4,8,0
25137
25138 NEXT_PAGE(level1_fixmap_pgt)
25139 .fill 512,8,0
25140
25141+NEXT_PAGE(level1_vsyscall_pgt)
25142+ .fill 512,8,0
25143+
25144 #undef PMDS
25145
25146- .data
25147+ .align PAGE_SIZE
25148+ENTRY(cpu_gdt_table)
25149+ .rept NR_CPUS
25150+ .quad 0x0000000000000000 /* NULL descriptor */
25151+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25152+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25153+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25154+ .quad 0x00cffb000000ffff /* __USER32_CS */
25155+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25156+ .quad 0x00affb000000ffff /* __USER_CS */
25157+
25158+#ifdef CONFIG_PAX_KERNEXEC
25159+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25160+#else
25161+ .quad 0x0 /* unused */
25162+#endif
25163+
25164+ .quad 0,0 /* TSS */
25165+ .quad 0,0 /* LDT */
25166+ .quad 0,0,0 /* three TLS descriptors */
25167+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25168+ /* asm/segment.h:GDT_ENTRIES must match this */
25169+
25170+#ifdef CONFIG_PAX_MEMORY_UDEREF
25171+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25172+#else
25173+ .quad 0x0 /* unused */
25174+#endif
25175+
25176+ /* zero the remaining page */
25177+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25178+ .endr
25179+
25180 .align 16
25181 .globl early_gdt_descr
25182 early_gdt_descr:
25183 .word GDT_ENTRIES*8-1
25184 early_gdt_descr_base:
25185- .quad INIT_PER_CPU_VAR(gdt_page)
25186+ .quad cpu_gdt_table
25187
25188 ENTRY(phys_base)
25189 /* This must match the first entry in level2_kernel_pgt */
25190 .quad 0x0000000000000000
25191
25192 #include "../../x86/xen/xen-head.S"
25193-
25194- __PAGE_ALIGNED_BSS
25195+
25196+ .section .rodata,"a",@progbits
25197 NEXT_PAGE(empty_zero_page)
25198 .skip PAGE_SIZE
25199diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25200index 05fd74f..c3548b1 100644
25201--- a/arch/x86/kernel/i386_ksyms_32.c
25202+++ b/arch/x86/kernel/i386_ksyms_32.c
25203@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25204 EXPORT_SYMBOL(cmpxchg8b_emu);
25205 #endif
25206
25207+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25208+
25209 /* Networking helper routines. */
25210 EXPORT_SYMBOL(csum_partial_copy_generic);
25211+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25212+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25213
25214 EXPORT_SYMBOL(__get_user_1);
25215 EXPORT_SYMBOL(__get_user_2);
25216@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25217 EXPORT_SYMBOL(___preempt_schedule_context);
25218 #endif
25219 #endif
25220+
25221+#ifdef CONFIG_PAX_KERNEXEC
25222+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25223+#endif
25224+
25225+#ifdef CONFIG_PAX_PER_CPU_PGD
25226+EXPORT_SYMBOL(cpu_pgd);
25227+#endif
25228diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25229index d5dd808..b6432cf 100644
25230--- a/arch/x86/kernel/i387.c
25231+++ b/arch/x86/kernel/i387.c
25232@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25233 static inline bool interrupted_user_mode(void)
25234 {
25235 struct pt_regs *regs = get_irq_regs();
25236- return regs && user_mode_vm(regs);
25237+ return regs && user_mode(regs);
25238 }
25239
25240 /*
25241diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25242index 8af8171..f8c1169 100644
25243--- a/arch/x86/kernel/i8259.c
25244+++ b/arch/x86/kernel/i8259.c
25245@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25246 static void make_8259A_irq(unsigned int irq)
25247 {
25248 disable_irq_nosync(irq);
25249- io_apic_irqs &= ~(1<<irq);
25250+ io_apic_irqs &= ~(1UL<<irq);
25251 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
25252 i8259A_chip.name);
25253 enable_irq(irq);
25254@@ -209,7 +209,7 @@ spurious_8259A_irq:
25255 "spurious 8259A interrupt: IRQ%d.\n", irq);
25256 spurious_irq_mask |= irqmask;
25257 }
25258- atomic_inc(&irq_err_count);
25259+ atomic_inc_unchecked(&irq_err_count);
25260 /*
25261 * Theoretically we do not have to handle this IRQ,
25262 * but in Linux this does not cause problems and is
25263@@ -350,14 +350,16 @@ static void init_8259A(int auto_eoi)
25264 /* (slave's support for AEOI in flat mode is to be investigated) */
25265 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25266
25267+ pax_open_kernel();
25268 if (auto_eoi)
25269 /*
25270 * In AEOI mode we just have to mask the interrupt
25271 * when acking.
25272 */
25273- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25274+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25275 else
25276- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25277+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25278+ pax_close_kernel();
25279
25280 udelay(100); /* wait for 8259A to initialize */
25281
25282diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25283index a979b5b..1d6db75 100644
25284--- a/arch/x86/kernel/io_delay.c
25285+++ b/arch/x86/kernel/io_delay.c
25286@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25287 * Quirk table for systems that misbehave (lock up, etc.) if port
25288 * 0x80 is used:
25289 */
25290-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25291+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25292 {
25293 .callback = dmi_io_delay_0xed_port,
25294 .ident = "Compaq Presario V6000",
25295diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25296index 4ddaf66..49d5c18 100644
25297--- a/arch/x86/kernel/ioport.c
25298+++ b/arch/x86/kernel/ioport.c
25299@@ -6,6 +6,7 @@
25300 #include <linux/sched.h>
25301 #include <linux/kernel.h>
25302 #include <linux/capability.h>
25303+#include <linux/security.h>
25304 #include <linux/errno.h>
25305 #include <linux/types.h>
25306 #include <linux/ioport.h>
25307@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25308 return -EINVAL;
25309 if (turn_on && !capable(CAP_SYS_RAWIO))
25310 return -EPERM;
25311+#ifdef CONFIG_GRKERNSEC_IO
25312+ if (turn_on && grsec_disable_privio) {
25313+ gr_handle_ioperm();
25314+ return -ENODEV;
25315+ }
25316+#endif
25317
25318 /*
25319 * If it's the first ioperm() call in this thread's lifetime, set the
25320@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25321 * because the ->io_bitmap_max value must match the bitmap
25322 * contents:
25323 */
25324- tss = &per_cpu(init_tss, get_cpu());
25325+ tss = init_tss + get_cpu();
25326
25327 if (turn_on)
25328 bitmap_clear(t->io_bitmap_ptr, from, num);
25329@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25330 if (level > old) {
25331 if (!capable(CAP_SYS_RAWIO))
25332 return -EPERM;
25333+#ifdef CONFIG_GRKERNSEC_IO
25334+ if (grsec_disable_privio) {
25335+ gr_handle_iopl();
25336+ return -ENODEV;
25337+ }
25338+#endif
25339 }
25340 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25341 t->iopl = level << 12;
25342diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25343index 922d285..6d20692 100644
25344--- a/arch/x86/kernel/irq.c
25345+++ b/arch/x86/kernel/irq.c
25346@@ -22,7 +22,7 @@
25347 #define CREATE_TRACE_POINTS
25348 #include <asm/trace/irq_vectors.h>
25349
25350-atomic_t irq_err_count;
25351+atomic_unchecked_t irq_err_count;
25352
25353 /* Function pointer for generic interrupt vector handling */
25354 void (*x86_platform_ipi_callback)(void) = NULL;
25355@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25356 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25357 seq_printf(p, " Hypervisor callback interrupts\n");
25358 #endif
25359- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25360+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25361 #if defined(CONFIG_X86_IO_APIC)
25362- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25363+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25364 #endif
25365 return 0;
25366 }
25367@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25368
25369 u64 arch_irq_stat(void)
25370 {
25371- u64 sum = atomic_read(&irq_err_count);
25372+ u64 sum = atomic_read_unchecked(&irq_err_count);
25373 return sum;
25374 }
25375
25376diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25377index 63ce838..2ea3e06 100644
25378--- a/arch/x86/kernel/irq_32.c
25379+++ b/arch/x86/kernel/irq_32.c
25380@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25381
25382 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25383
25384+extern void gr_handle_kernel_exploit(void);
25385+
25386 int sysctl_panic_on_stackoverflow __read_mostly;
25387
25388 /* Debugging check for stack overflow: is there less than 1KB free? */
25389@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25390 __asm__ __volatile__("andl %%esp,%0" :
25391 "=r" (sp) : "0" (THREAD_SIZE - 1));
25392
25393- return sp < (sizeof(struct thread_info) + STACK_WARN);
25394+ return sp < STACK_WARN;
25395 }
25396
25397 static void print_stack_overflow(void)
25398 {
25399 printk(KERN_WARNING "low stack detected by irq handler\n");
25400 dump_stack();
25401+ gr_handle_kernel_exploit();
25402 if (sysctl_panic_on_stackoverflow)
25403 panic("low stack detected by irq handler - check messages\n");
25404 }
25405@@ -84,10 +87,9 @@ static inline void *current_stack(void)
25406 static inline int
25407 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25408 {
25409- struct irq_stack *curstk, *irqstk;
25410+ struct irq_stack *irqstk;
25411 u32 *isp, *prev_esp, arg1, arg2;
25412
25413- curstk = (struct irq_stack *) current_stack();
25414 irqstk = __this_cpu_read(hardirq_stack);
25415
25416 /*
25417@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25418 * handler) we can't do that and just have to keep using the
25419 * current stack (which is the irq stack already after all)
25420 */
25421- if (unlikely(curstk == irqstk))
25422+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25423 return 0;
25424
25425- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25426+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25427
25428 /* Save the next esp at the bottom of the stack */
25429 prev_esp = (u32 *)irqstk;
25430 *prev_esp = current_stack_pointer;
25431
25432+#ifdef CONFIG_PAX_MEMORY_UDEREF
25433+ __set_fs(MAKE_MM_SEG(0));
25434+#endif
25435+
25436 if (unlikely(overflow))
25437 call_on_stack(print_stack_overflow, isp);
25438
25439@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25440 : "0" (irq), "1" (desc), "2" (isp),
25441 "D" (desc->handle_irq)
25442 : "memory", "cc", "ecx");
25443+
25444+#ifdef CONFIG_PAX_MEMORY_UDEREF
25445+ __set_fs(current_thread_info()->addr_limit);
25446+#endif
25447+
25448 return 1;
25449 }
25450
25451@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25452 */
25453 void irq_ctx_init(int cpu)
25454 {
25455- struct irq_stack *irqstk;
25456-
25457 if (per_cpu(hardirq_stack, cpu))
25458 return;
25459
25460- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25461- THREADINFO_GFP,
25462- THREAD_SIZE_ORDER));
25463- per_cpu(hardirq_stack, cpu) = irqstk;
25464-
25465- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25466- THREADINFO_GFP,
25467- THREAD_SIZE_ORDER));
25468- per_cpu(softirq_stack, cpu) = irqstk;
25469-
25470- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25471- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25472+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25473+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25474 }
25475
25476 void do_softirq_own_stack(void)
25477 {
25478- struct thread_info *curstk;
25479 struct irq_stack *irqstk;
25480 u32 *isp, *prev_esp;
25481
25482- curstk = current_stack();
25483 irqstk = __this_cpu_read(softirq_stack);
25484
25485 /* build the stack frame on the softirq stack */
25486@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
25487 prev_esp = (u32 *)irqstk;
25488 *prev_esp = current_stack_pointer;
25489
25490+#ifdef CONFIG_PAX_MEMORY_UDEREF
25491+ __set_fs(MAKE_MM_SEG(0));
25492+#endif
25493+
25494 call_on_stack(__do_softirq, isp);
25495+
25496+#ifdef CONFIG_PAX_MEMORY_UDEREF
25497+ __set_fs(current_thread_info()->addr_limit);
25498+#endif
25499+
25500 }
25501
25502 bool handle_irq(unsigned irq, struct pt_regs *regs)
25503@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25504 if (unlikely(!desc))
25505 return false;
25506
25507- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25508+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25509 if (unlikely(overflow))
25510 print_stack_overflow();
25511 desc->handle_irq(irq, desc);
25512diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25513index 4d1c746..55a22d6 100644
25514--- a/arch/x86/kernel/irq_64.c
25515+++ b/arch/x86/kernel/irq_64.c
25516@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25517 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25518 EXPORT_PER_CPU_SYMBOL(irq_regs);
25519
25520+extern void gr_handle_kernel_exploit(void);
25521+
25522 int sysctl_panic_on_stackoverflow;
25523
25524 /*
25525@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25526 u64 estack_top, estack_bottom;
25527 u64 curbase = (u64)task_stack_page(current);
25528
25529- if (user_mode_vm(regs))
25530+ if (user_mode(regs))
25531 return;
25532
25533 if (regs->sp >= curbase + sizeof(struct thread_info) +
25534@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25535 irq_stack_top, irq_stack_bottom,
25536 estack_top, estack_bottom);
25537
25538+ gr_handle_kernel_exploit();
25539+
25540 if (sysctl_panic_on_stackoverflow)
25541 panic("low stack detected by irq handler - check messages\n");
25542 #endif
25543diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25544index 26d5a55..a01160a 100644
25545--- a/arch/x86/kernel/jump_label.c
25546+++ b/arch/x86/kernel/jump_label.c
25547@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25548 * Jump label is enabled for the first time.
25549 * So we expect a default_nop...
25550 */
25551- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25552+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25553 != 0))
25554 bug_at((void *)entry->code, __LINE__);
25555 } else {
25556@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25557 * ...otherwise expect an ideal_nop. Otherwise
25558 * something went horribly wrong.
25559 */
25560- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25561+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25562 != 0))
25563 bug_at((void *)entry->code, __LINE__);
25564 }
25565@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25566 * are converting the default nop to the ideal nop.
25567 */
25568 if (init) {
25569- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25570+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25571 bug_at((void *)entry->code, __LINE__);
25572 } else {
25573 code.jump = 0xe9;
25574 code.offset = entry->target -
25575 (entry->code + JUMP_LABEL_NOP_SIZE);
25576- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25577+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25578 bug_at((void *)entry->code, __LINE__);
25579 }
25580 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25581diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25582index 7ec1d5f..5a7d130 100644
25583--- a/arch/x86/kernel/kgdb.c
25584+++ b/arch/x86/kernel/kgdb.c
25585@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25586 #ifdef CONFIG_X86_32
25587 switch (regno) {
25588 case GDB_SS:
25589- if (!user_mode_vm(regs))
25590+ if (!user_mode(regs))
25591 *(unsigned long *)mem = __KERNEL_DS;
25592 break;
25593 case GDB_SP:
25594- if (!user_mode_vm(regs))
25595+ if (!user_mode(regs))
25596 *(unsigned long *)mem = kernel_stack_pointer(regs);
25597 break;
25598 case GDB_GS:
25599@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25600 bp->attr.bp_addr = breakinfo[breakno].addr;
25601 bp->attr.bp_len = breakinfo[breakno].len;
25602 bp->attr.bp_type = breakinfo[breakno].type;
25603- info->address = breakinfo[breakno].addr;
25604+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25605+ info->address = ktla_ktva(breakinfo[breakno].addr);
25606+ else
25607+ info->address = breakinfo[breakno].addr;
25608 info->len = breakinfo[breakno].len;
25609 info->type = breakinfo[breakno].type;
25610 val = arch_install_hw_breakpoint(bp);
25611@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25612 case 'k':
25613 /* clear the trace bit */
25614 linux_regs->flags &= ~X86_EFLAGS_TF;
25615- atomic_set(&kgdb_cpu_doing_single_step, -1);
25616+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25617
25618 /* set the trace bit if we're stepping */
25619 if (remcomInBuffer[0] == 's') {
25620 linux_regs->flags |= X86_EFLAGS_TF;
25621- atomic_set(&kgdb_cpu_doing_single_step,
25622+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25623 raw_smp_processor_id());
25624 }
25625
25626@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25627
25628 switch (cmd) {
25629 case DIE_DEBUG:
25630- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25631+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25632 if (user_mode(regs))
25633 return single_step_cont(regs, args);
25634 break;
25635@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25636 #endif /* CONFIG_DEBUG_RODATA */
25637
25638 bpt->type = BP_BREAKPOINT;
25639- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25640+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25641 BREAK_INSTR_SIZE);
25642 if (err)
25643 return err;
25644- err = probe_kernel_write((char *)bpt->bpt_addr,
25645+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25646 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25647 #ifdef CONFIG_DEBUG_RODATA
25648 if (!err)
25649@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25650 return -EBUSY;
25651 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25652 BREAK_INSTR_SIZE);
25653- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25654+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25655 if (err)
25656 return err;
25657 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25658@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25659 if (mutex_is_locked(&text_mutex))
25660 goto knl_write;
25661 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25662- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25663+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25664 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25665 goto knl_write;
25666 return err;
25667 knl_write:
25668 #endif /* CONFIG_DEBUG_RODATA */
25669- return probe_kernel_write((char *)bpt->bpt_addr,
25670+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25671 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25672 }
25673
25674diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25675index 67e6d19..731ed28 100644
25676--- a/arch/x86/kernel/kprobes/core.c
25677+++ b/arch/x86/kernel/kprobes/core.c
25678@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25679 s32 raddr;
25680 } __packed *insn;
25681
25682- insn = (struct __arch_relative_insn *)from;
25683+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25684+
25685+ pax_open_kernel();
25686 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25687 insn->op = op;
25688+ pax_close_kernel();
25689 }
25690
25691 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25692@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25693 kprobe_opcode_t opcode;
25694 kprobe_opcode_t *orig_opcodes = opcodes;
25695
25696- if (search_exception_tables((unsigned long)opcodes))
25697+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25698 return 0; /* Page fault may occur on this address. */
25699
25700 retry:
25701@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25702 * for the first byte, we can recover the original instruction
25703 * from it and kp->opcode.
25704 */
25705- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25706+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25707 buf[0] = kp->opcode;
25708- return (unsigned long)buf;
25709+ return ktva_ktla((unsigned long)buf);
25710 }
25711
25712 /*
25713@@ -336,7 +339,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25714 /* Another subsystem puts a breakpoint, failed to recover */
25715 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25716 return 0;
25717+ pax_open_kernel();
25718 memcpy(dest, insn.kaddr, insn.length);
25719+ pax_close_kernel();
25720
25721 #ifdef CONFIG_X86_64
25722 if (insn_rip_relative(&insn)) {
25723@@ -363,7 +368,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25724 return 0;
25725 }
25726 disp = (u8 *) dest + insn_offset_displacement(&insn);
25727+ pax_open_kernel();
25728 *(s32 *) disp = (s32) newdisp;
25729+ pax_close_kernel();
25730 }
25731 #endif
25732 return insn.length;
25733@@ -505,7 +512,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25734 * nor set current_kprobe, because it doesn't use single
25735 * stepping.
25736 */
25737- regs->ip = (unsigned long)p->ainsn.insn;
25738+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25739 preempt_enable_no_resched();
25740 return;
25741 }
25742@@ -522,9 +529,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25743 regs->flags &= ~X86_EFLAGS_IF;
25744 /* single step inline if the instruction is an int3 */
25745 if (p->opcode == BREAKPOINT_INSTRUCTION)
25746- regs->ip = (unsigned long)p->addr;
25747+ regs->ip = ktla_ktva((unsigned long)p->addr);
25748 else
25749- regs->ip = (unsigned long)p->ainsn.insn;
25750+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25751 }
25752 NOKPROBE_SYMBOL(setup_singlestep);
25753
25754@@ -574,7 +581,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25755 struct kprobe *p;
25756 struct kprobe_ctlblk *kcb;
25757
25758- if (user_mode_vm(regs))
25759+ if (user_mode(regs))
25760 return 0;
25761
25762 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25763@@ -609,7 +616,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25764 setup_singlestep(p, regs, kcb, 0);
25765 return 1;
25766 }
25767- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25768+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25769 /*
25770 * The breakpoint instruction was removed right
25771 * after we hit it. Another cpu has removed
25772@@ -656,6 +663,9 @@ static void __used kretprobe_trampoline_holder(void)
25773 " movq %rax, 152(%rsp)\n"
25774 RESTORE_REGS_STRING
25775 " popfq\n"
25776+#ifdef KERNEXEC_PLUGIN
25777+ " btsq $63,(%rsp)\n"
25778+#endif
25779 #else
25780 " pushf\n"
25781 SAVE_REGS_STRING
25782@@ -796,7 +806,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25783 struct kprobe_ctlblk *kcb)
25784 {
25785 unsigned long *tos = stack_addr(regs);
25786- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25787+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25788 unsigned long orig_ip = (unsigned long)p->addr;
25789 kprobe_opcode_t *insn = p->ainsn.insn;
25790
25791@@ -979,7 +989,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25792 struct die_args *args = data;
25793 int ret = NOTIFY_DONE;
25794
25795- if (args->regs && user_mode_vm(args->regs))
25796+ if (args->regs && user_mode(args->regs))
25797 return ret;
25798
25799 if (val == DIE_GPF) {
25800diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25801index f304773..551e63c 100644
25802--- a/arch/x86/kernel/kprobes/opt.c
25803+++ b/arch/x86/kernel/kprobes/opt.c
25804@@ -79,6 +79,7 @@ found:
25805 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25806 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25807 {
25808+ pax_open_kernel();
25809 #ifdef CONFIG_X86_64
25810 *addr++ = 0x48;
25811 *addr++ = 0xbf;
25812@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25813 *addr++ = 0xb8;
25814 #endif
25815 *(unsigned long *)addr = val;
25816+ pax_close_kernel();
25817 }
25818
25819 asm (
25820@@ -337,7 +339,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25821 * Verify if the address gap is in 2GB range, because this uses
25822 * a relative jump.
25823 */
25824- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25825+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25826 if (abs(rel) > 0x7fffffff)
25827 return -ERANGE;
25828
25829@@ -352,16 +354,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25830 op->optinsn.size = ret;
25831
25832 /* Copy arch-dep-instance from template */
25833- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25834+ pax_open_kernel();
25835+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25836+ pax_close_kernel();
25837
25838 /* Set probe information */
25839 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25840
25841 /* Set probe function call */
25842- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25843+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25844
25845 /* Set returning jmp instruction at the tail of out-of-line buffer */
25846- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25847+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25848 (u8 *)op->kp.addr + op->optinsn.size);
25849
25850 flush_icache_range((unsigned long) buf,
25851@@ -386,7 +390,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25852 WARN_ON(kprobe_disabled(&op->kp));
25853
25854 /* Backup instructions which will be replaced by jump address */
25855- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25856+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25857 RELATIVE_ADDR_SIZE);
25858
25859 insn_buf[0] = RELATIVEJUMP_OPCODE;
25860@@ -434,7 +438,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25861 /* This kprobe is really able to run optimized path. */
25862 op = container_of(p, struct optimized_kprobe, kp);
25863 /* Detour through copied instructions */
25864- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25865+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25866 if (!reenter)
25867 reset_current_kprobe();
25868 preempt_enable_no_resched();
25869diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25870index c2bedae..25e7ab60 100644
25871--- a/arch/x86/kernel/ksysfs.c
25872+++ b/arch/x86/kernel/ksysfs.c
25873@@ -184,7 +184,7 @@ out:
25874
25875 static struct kobj_attribute type_attr = __ATTR_RO(type);
25876
25877-static struct bin_attribute data_attr = {
25878+static bin_attribute_no_const data_attr __read_only = {
25879 .attr = {
25880 .name = "data",
25881 .mode = S_IRUGO,
25882diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25883index c37886d..d851d32 100644
25884--- a/arch/x86/kernel/ldt.c
25885+++ b/arch/x86/kernel/ldt.c
25886@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25887 if (reload) {
25888 #ifdef CONFIG_SMP
25889 preempt_disable();
25890- load_LDT(pc);
25891+ load_LDT_nolock(pc);
25892 if (!cpumask_equal(mm_cpumask(current->mm),
25893 cpumask_of(smp_processor_id())))
25894 smp_call_function(flush_ldt, current->mm, 1);
25895 preempt_enable();
25896 #else
25897- load_LDT(pc);
25898+ load_LDT_nolock(pc);
25899 #endif
25900 }
25901 if (oldsize) {
25902@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25903 return err;
25904
25905 for (i = 0; i < old->size; i++)
25906- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25907+ write_ldt_entry(new->ldt, i, old->ldt + i);
25908 return 0;
25909 }
25910
25911@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25912 retval = copy_ldt(&mm->context, &old_mm->context);
25913 mutex_unlock(&old_mm->context.lock);
25914 }
25915+
25916+ if (tsk == current) {
25917+ mm->context.vdso = 0;
25918+
25919+#ifdef CONFIG_X86_32
25920+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25921+ mm->context.user_cs_base = 0UL;
25922+ mm->context.user_cs_limit = ~0UL;
25923+
25924+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25925+ cpus_clear(mm->context.cpu_user_cs_mask);
25926+#endif
25927+
25928+#endif
25929+#endif
25930+
25931+ }
25932+
25933 return retval;
25934 }
25935
25936@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25937 }
25938 }
25939
25940+#ifdef CONFIG_PAX_SEGMEXEC
25941+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25942+ error = -EINVAL;
25943+ goto out_unlock;
25944+ }
25945+#endif
25946+
25947 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25948 error = -EINVAL;
25949 goto out_unlock;
25950diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25951index 1667b1d..16492c5 100644
25952--- a/arch/x86/kernel/machine_kexec_32.c
25953+++ b/arch/x86/kernel/machine_kexec_32.c
25954@@ -25,7 +25,7 @@
25955 #include <asm/cacheflush.h>
25956 #include <asm/debugreg.h>
25957
25958-static void set_idt(void *newidt, __u16 limit)
25959+static void set_idt(struct desc_struct *newidt, __u16 limit)
25960 {
25961 struct desc_ptr curidt;
25962
25963@@ -37,7 +37,7 @@ static void set_idt(void *newidt, __u16 limit)
25964 }
25965
25966
25967-static void set_gdt(void *newgdt, __u16 limit)
25968+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25969 {
25970 struct desc_ptr curgdt;
25971
25972@@ -215,7 +215,7 @@ void machine_kexec(struct kimage *image)
25973 }
25974
25975 control_page = page_address(image->control_code_page);
25976- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25977+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25978
25979 relocate_kernel_ptr = control_page;
25980 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25981diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25982index c050a01..5774072 100644
25983--- a/arch/x86/kernel/mcount_64.S
25984+++ b/arch/x86/kernel/mcount_64.S
25985@@ -7,7 +7,7 @@
25986 #include <linux/linkage.h>
25987 #include <asm/ptrace.h>
25988 #include <asm/ftrace.h>
25989-
25990+#include <asm/alternative-asm.h>
25991
25992 .code64
25993 .section .entry.text, "ax"
25994@@ -24,8 +24,9 @@
25995 #ifdef CONFIG_DYNAMIC_FTRACE
25996
25997 ENTRY(function_hook)
25998+ pax_force_retaddr
25999 retq
26000-END(function_hook)
26001+ENDPROC(function_hook)
26002
26003 /* skip is set if stack has been adjusted */
26004 .macro ftrace_caller_setup skip=0
26005@@ -66,8 +67,9 @@ GLOBAL(ftrace_graph_call)
26006 #endif
26007
26008 GLOBAL(ftrace_stub)
26009+ pax_force_retaddr
26010 retq
26011-END(ftrace_caller)
26012+ENDPROC(ftrace_caller)
26013
26014 ENTRY(ftrace_regs_caller)
26015 /* Save the current flags before compare (in SS location)*/
26016@@ -135,7 +137,7 @@ ftrace_restore_flags:
26017 popfq
26018 jmp ftrace_stub
26019
26020-END(ftrace_regs_caller)
26021+ENDPROC(ftrace_regs_caller)
26022
26023
26024 #else /* ! CONFIG_DYNAMIC_FTRACE */
26025@@ -156,6 +158,7 @@ ENTRY(function_hook)
26026 #endif
26027
26028 GLOBAL(ftrace_stub)
26029+ pax_force_retaddr
26030 retq
26031
26032 trace:
26033@@ -169,12 +172,13 @@ trace:
26034 #endif
26035 subq $MCOUNT_INSN_SIZE, %rdi
26036
26037+ pax_force_fptr ftrace_trace_function
26038 call *ftrace_trace_function
26039
26040 MCOUNT_RESTORE_FRAME
26041
26042 jmp ftrace_stub
26043-END(function_hook)
26044+ENDPROC(function_hook)
26045 #endif /* CONFIG_DYNAMIC_FTRACE */
26046 #endif /* CONFIG_FUNCTION_TRACER */
26047
26048@@ -196,8 +200,9 @@ ENTRY(ftrace_graph_caller)
26049
26050 MCOUNT_RESTORE_FRAME
26051
26052+ pax_force_retaddr
26053 retq
26054-END(ftrace_graph_caller)
26055+ENDPROC(ftrace_graph_caller)
26056
26057 GLOBAL(return_to_handler)
26058 subq $24, %rsp
26059@@ -213,5 +218,7 @@ GLOBAL(return_to_handler)
26060 movq 8(%rsp), %rdx
26061 movq (%rsp), %rax
26062 addq $24, %rsp
26063+ pax_force_fptr %rdi
26064 jmp *%rdi
26065+ENDPROC(return_to_handler)
26066 #endif
26067diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
26068index e69f988..da078ea 100644
26069--- a/arch/x86/kernel/module.c
26070+++ b/arch/x86/kernel/module.c
26071@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
26072 }
26073 #endif
26074
26075-void *module_alloc(unsigned long size)
26076+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
26077 {
26078- if (PAGE_ALIGN(size) > MODULES_LEN)
26079+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
26080 return NULL;
26081 return __vmalloc_node_range(size, 1,
26082 MODULES_VADDR + get_module_load_offset(),
26083- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
26084- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
26085+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
26086+ prot, NUMA_NO_NODE,
26087 __builtin_return_address(0));
26088 }
26089
26090+void *module_alloc(unsigned long size)
26091+{
26092+
26093+#ifdef CONFIG_PAX_KERNEXEC
26094+ return __module_alloc(size, PAGE_KERNEL);
26095+#else
26096+ return __module_alloc(size, PAGE_KERNEL_EXEC);
26097+#endif
26098+
26099+}
26100+
26101+#ifdef CONFIG_PAX_KERNEXEC
26102+#ifdef CONFIG_X86_32
26103+void *module_alloc_exec(unsigned long size)
26104+{
26105+ struct vm_struct *area;
26106+
26107+ if (size == 0)
26108+ return NULL;
26109+
26110+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
26111+return area ? area->addr : NULL;
26112+}
26113+EXPORT_SYMBOL(module_alloc_exec);
26114+
26115+void module_free_exec(struct module *mod, void *module_region)
26116+{
26117+ vunmap(module_region);
26118+}
26119+EXPORT_SYMBOL(module_free_exec);
26120+#else
26121+void module_free_exec(struct module *mod, void *module_region)
26122+{
26123+ module_free(mod, module_region);
26124+}
26125+EXPORT_SYMBOL(module_free_exec);
26126+
26127+void *module_alloc_exec(unsigned long size)
26128+{
26129+ return __module_alloc(size, PAGE_KERNEL_RX);
26130+}
26131+EXPORT_SYMBOL(module_alloc_exec);
26132+#endif
26133+#endif
26134+
26135 #ifdef CONFIG_X86_32
26136 int apply_relocate(Elf32_Shdr *sechdrs,
26137 const char *strtab,
26138@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26139 unsigned int i;
26140 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26141 Elf32_Sym *sym;
26142- uint32_t *location;
26143+ uint32_t *plocation, location;
26144
26145 DEBUGP("Applying relocate section %u to %u\n",
26146 relsec, sechdrs[relsec].sh_info);
26147 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26148 /* This is where to make the change */
26149- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26150- + rel[i].r_offset;
26151+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26152+ location = (uint32_t)plocation;
26153+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26154+ plocation = ktla_ktva((void *)plocation);
26155 /* This is the symbol it is referring to. Note that all
26156 undefined symbols have been resolved. */
26157 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26158@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26159 switch (ELF32_R_TYPE(rel[i].r_info)) {
26160 case R_386_32:
26161 /* We add the value into the location given */
26162- *location += sym->st_value;
26163+ pax_open_kernel();
26164+ *plocation += sym->st_value;
26165+ pax_close_kernel();
26166 break;
26167 case R_386_PC32:
26168 /* Add the value, subtract its position */
26169- *location += sym->st_value - (uint32_t)location;
26170+ pax_open_kernel();
26171+ *plocation += sym->st_value - location;
26172+ pax_close_kernel();
26173 break;
26174 default:
26175 pr_err("%s: Unknown relocation: %u\n",
26176@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26177 case R_X86_64_NONE:
26178 break;
26179 case R_X86_64_64:
26180+ pax_open_kernel();
26181 *(u64 *)loc = val;
26182+ pax_close_kernel();
26183 break;
26184 case R_X86_64_32:
26185+ pax_open_kernel();
26186 *(u32 *)loc = val;
26187+ pax_close_kernel();
26188 if (val != *(u32 *)loc)
26189 goto overflow;
26190 break;
26191 case R_X86_64_32S:
26192+ pax_open_kernel();
26193 *(s32 *)loc = val;
26194+ pax_close_kernel();
26195 if ((s64)val != *(s32 *)loc)
26196 goto overflow;
26197 break;
26198 case R_X86_64_PC32:
26199 val -= (u64)loc;
26200+ pax_open_kernel();
26201 *(u32 *)loc = val;
26202+ pax_close_kernel();
26203+
26204 #if 0
26205 if ((s64)val != *(s32 *)loc)
26206 goto overflow;
26207diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26208index c9603ac..9f88728 100644
26209--- a/arch/x86/kernel/msr.c
26210+++ b/arch/x86/kernel/msr.c
26211@@ -37,6 +37,7 @@
26212 #include <linux/notifier.h>
26213 #include <linux/uaccess.h>
26214 #include <linux/gfp.h>
26215+#include <linux/grsecurity.h>
26216
26217 #include <asm/processor.h>
26218 #include <asm/msr.h>
26219@@ -103,6 +104,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26220 int err = 0;
26221 ssize_t bytes = 0;
26222
26223+#ifdef CONFIG_GRKERNSEC_KMEM
26224+ gr_handle_msr_write();
26225+ return -EPERM;
26226+#endif
26227+
26228 if (count % 8)
26229 return -EINVAL; /* Invalid chunk size */
26230
26231@@ -150,6 +156,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26232 err = -EBADF;
26233 break;
26234 }
26235+#ifdef CONFIG_GRKERNSEC_KMEM
26236+ gr_handle_msr_write();
26237+ return -EPERM;
26238+#endif
26239 if (copy_from_user(&regs, uregs, sizeof regs)) {
26240 err = -EFAULT;
26241 break;
26242@@ -233,7 +243,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26243 return notifier_from_errno(err);
26244 }
26245
26246-static struct notifier_block __refdata msr_class_cpu_notifier = {
26247+static struct notifier_block msr_class_cpu_notifier = {
26248 .notifier_call = msr_class_cpu_callback,
26249 };
26250
26251diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26252index c3e985d..110a36a 100644
26253--- a/arch/x86/kernel/nmi.c
26254+++ b/arch/x86/kernel/nmi.c
26255@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26256
26257 static void nmi_max_handler(struct irq_work *w)
26258 {
26259- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26260+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26261 int remainder_ns, decimal_msecs;
26262- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26263+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26264
26265 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26266 decimal_msecs = remainder_ns / 1000;
26267
26268 printk_ratelimited(KERN_INFO
26269 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26270- a->handler, whole_msecs, decimal_msecs);
26271+ n->action->handler, whole_msecs, decimal_msecs);
26272 }
26273
26274 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26275@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26276 delta = sched_clock() - delta;
26277 trace_nmi_handler(a->handler, (int)delta, thishandled);
26278
26279- if (delta < nmi_longest_ns || delta < a->max_duration)
26280+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26281 continue;
26282
26283- a->max_duration = delta;
26284- irq_work_queue(&a->irq_work);
26285+ a->work->max_duration = delta;
26286+ irq_work_queue(&a->work->irq_work);
26287 }
26288
26289 rcu_read_unlock();
26290@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26291 }
26292 NOKPROBE_SYMBOL(nmi_handle);
26293
26294-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26295+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26296 {
26297 struct nmi_desc *desc = nmi_to_desc(type);
26298 unsigned long flags;
26299@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26300 if (!action->handler)
26301 return -EINVAL;
26302
26303- init_irq_work(&action->irq_work, nmi_max_handler);
26304+ action->work->action = action;
26305+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26306
26307 spin_lock_irqsave(&desc->lock, flags);
26308
26309@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26310 * event confuses some handlers (kdump uses this flag)
26311 */
26312 if (action->flags & NMI_FLAG_FIRST)
26313- list_add_rcu(&action->list, &desc->head);
26314+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26315 else
26316- list_add_tail_rcu(&action->list, &desc->head);
26317+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26318
26319 spin_unlock_irqrestore(&desc->lock, flags);
26320 return 0;
26321@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26322 if (!strcmp(n->name, name)) {
26323 WARN(in_nmi(),
26324 "Trying to free NMI (%s) from NMI context!\n", n->name);
26325- list_del_rcu(&n->list);
26326+ pax_list_del_rcu((struct list_head *)&n->list);
26327 break;
26328 }
26329 }
26330@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26331 dotraplinkage notrace void
26332 do_nmi(struct pt_regs *regs, long error_code)
26333 {
26334+
26335+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26336+ if (!user_mode(regs)) {
26337+ unsigned long cs = regs->cs & 0xFFFF;
26338+ unsigned long ip = ktva_ktla(regs->ip);
26339+
26340+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26341+ regs->ip = ip;
26342+ }
26343+#endif
26344+
26345 nmi_nesting_preprocess(regs);
26346
26347 nmi_enter();
26348diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26349index 6d9582e..f746287 100644
26350--- a/arch/x86/kernel/nmi_selftest.c
26351+++ b/arch/x86/kernel/nmi_selftest.c
26352@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26353 {
26354 /* trap all the unknown NMIs we may generate */
26355 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26356- __initdata);
26357+ __initconst);
26358 }
26359
26360 static void __init cleanup_nmi_testsuite(void)
26361@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26362 unsigned long timeout;
26363
26364 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26365- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26366+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26367 nmi_fail = FAILURE;
26368 return;
26369 }
26370diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26371index bbb6c73..24a58ef 100644
26372--- a/arch/x86/kernel/paravirt-spinlocks.c
26373+++ b/arch/x86/kernel/paravirt-spinlocks.c
26374@@ -8,7 +8,7 @@
26375
26376 #include <asm/paravirt.h>
26377
26378-struct pv_lock_ops pv_lock_ops = {
26379+struct pv_lock_ops pv_lock_ops __read_only = {
26380 #ifdef CONFIG_SMP
26381 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26382 .unlock_kick = paravirt_nop,
26383diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26384index 548d25f..f8fb99c 100644
26385--- a/arch/x86/kernel/paravirt.c
26386+++ b/arch/x86/kernel/paravirt.c
26387@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26388 {
26389 return x;
26390 }
26391+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26392+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26393+#endif
26394
26395 void __init default_banner(void)
26396 {
26397@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26398
26399 if (opfunc == NULL)
26400 /* If there's no function, patch it with a ud2a (BUG) */
26401- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26402- else if (opfunc == _paravirt_nop)
26403+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26404+ else if (opfunc == (void *)_paravirt_nop)
26405 /* If the operation is a nop, then nop the callsite */
26406 ret = paravirt_patch_nop();
26407
26408 /* identity functions just return their single argument */
26409- else if (opfunc == _paravirt_ident_32)
26410+ else if (opfunc == (void *)_paravirt_ident_32)
26411 ret = paravirt_patch_ident_32(insnbuf, len);
26412- else if (opfunc == _paravirt_ident_64)
26413+ else if (opfunc == (void *)_paravirt_ident_64)
26414 ret = paravirt_patch_ident_64(insnbuf, len);
26415+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26416+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26417+ ret = paravirt_patch_ident_64(insnbuf, len);
26418+#endif
26419
26420 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26421 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26422@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26423 if (insn_len > len || start == NULL)
26424 insn_len = len;
26425 else
26426- memcpy(insnbuf, start, insn_len);
26427+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26428
26429 return insn_len;
26430 }
26431@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26432 return this_cpu_read(paravirt_lazy_mode);
26433 }
26434
26435-struct pv_info pv_info = {
26436+struct pv_info pv_info __read_only = {
26437 .name = "bare hardware",
26438 .paravirt_enabled = 0,
26439 .kernel_rpl = 0,
26440@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26441 #endif
26442 };
26443
26444-struct pv_init_ops pv_init_ops = {
26445+struct pv_init_ops pv_init_ops __read_only = {
26446 .patch = native_patch,
26447 };
26448
26449-struct pv_time_ops pv_time_ops = {
26450+struct pv_time_ops pv_time_ops __read_only = {
26451 .sched_clock = native_sched_clock,
26452 .steal_clock = native_steal_clock,
26453 };
26454
26455-__visible struct pv_irq_ops pv_irq_ops = {
26456+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26457 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26458 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26459 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26460@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26461 #endif
26462 };
26463
26464-__visible struct pv_cpu_ops pv_cpu_ops = {
26465+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26466 .cpuid = native_cpuid,
26467 .get_debugreg = native_get_debugreg,
26468 .set_debugreg = native_set_debugreg,
26469@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26470 NOKPROBE_SYMBOL(native_set_debugreg);
26471 NOKPROBE_SYMBOL(native_load_idt);
26472
26473-struct pv_apic_ops pv_apic_ops = {
26474+struct pv_apic_ops pv_apic_ops __read_only= {
26475 #ifdef CONFIG_X86_LOCAL_APIC
26476 .startup_ipi_hook = paravirt_nop,
26477 #endif
26478 };
26479
26480-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26481+#ifdef CONFIG_X86_32
26482+#ifdef CONFIG_X86_PAE
26483+/* 64-bit pagetable entries */
26484+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26485+#else
26486 /* 32-bit pagetable entries */
26487 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26488+#endif
26489 #else
26490 /* 64-bit pagetable entries */
26491 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26492 #endif
26493
26494-struct pv_mmu_ops pv_mmu_ops = {
26495+struct pv_mmu_ops pv_mmu_ops __read_only = {
26496
26497 .read_cr2 = native_read_cr2,
26498 .write_cr2 = native_write_cr2,
26499@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26500 .make_pud = PTE_IDENT,
26501
26502 .set_pgd = native_set_pgd,
26503+ .set_pgd_batched = native_set_pgd_batched,
26504 #endif
26505 #endif /* PAGETABLE_LEVELS >= 3 */
26506
26507@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26508 },
26509
26510 .set_fixmap = native_set_fixmap,
26511+
26512+#ifdef CONFIG_PAX_KERNEXEC
26513+ .pax_open_kernel = native_pax_open_kernel,
26514+ .pax_close_kernel = native_pax_close_kernel,
26515+#endif
26516+
26517 };
26518
26519 EXPORT_SYMBOL_GPL(pv_time_ops);
26520diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26521index 0497f71..7186c0d 100644
26522--- a/arch/x86/kernel/pci-calgary_64.c
26523+++ b/arch/x86/kernel/pci-calgary_64.c
26524@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26525 tce_space = be64_to_cpu(readq(target));
26526 tce_space = tce_space & TAR_SW_BITS;
26527
26528- tce_space = tce_space & (~specified_table_size);
26529+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26530 info->tce_space = (u64 *)__va(tce_space);
26531 }
26532 }
26533diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26534index 35ccf75..7a15747 100644
26535--- a/arch/x86/kernel/pci-iommu_table.c
26536+++ b/arch/x86/kernel/pci-iommu_table.c
26537@@ -2,7 +2,7 @@
26538 #include <asm/iommu_table.h>
26539 #include <linux/string.h>
26540 #include <linux/kallsyms.h>
26541-
26542+#include <linux/sched.h>
26543
26544 #define DEBUG 1
26545
26546diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26547index 77dd0ad..9ec4723 100644
26548--- a/arch/x86/kernel/pci-swiotlb.c
26549+++ b/arch/x86/kernel/pci-swiotlb.c
26550@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26551 struct dma_attrs *attrs)
26552 {
26553 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26554- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26555+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26556 else
26557 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26558 }
26559diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
26560index ca7f0d5..8996469 100644
26561--- a/arch/x86/kernel/preempt.S
26562+++ b/arch/x86/kernel/preempt.S
26563@@ -3,12 +3,14 @@
26564 #include <asm/dwarf2.h>
26565 #include <asm/asm.h>
26566 #include <asm/calling.h>
26567+#include <asm/alternative-asm.h>
26568
26569 ENTRY(___preempt_schedule)
26570 CFI_STARTPROC
26571 SAVE_ALL
26572 call preempt_schedule
26573 RESTORE_ALL
26574+ pax_force_retaddr
26575 ret
26576 CFI_ENDPROC
26577
26578@@ -19,6 +21,7 @@ ENTRY(___preempt_schedule_context)
26579 SAVE_ALL
26580 call preempt_schedule_context
26581 RESTORE_ALL
26582+ pax_force_retaddr
26583 ret
26584 CFI_ENDPROC
26585
26586diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26587index 4505e2a..ae28b0d 100644
26588--- a/arch/x86/kernel/process.c
26589+++ b/arch/x86/kernel/process.c
26590@@ -36,7 +36,8 @@
26591 * section. Since TSS's are completely CPU-local, we want them
26592 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26593 */
26594-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26595+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26596+EXPORT_SYMBOL(init_tss);
26597
26598 #ifdef CONFIG_X86_64
26599 static DEFINE_PER_CPU(unsigned char, is_idle);
26600@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
26601 task_xstate_cachep =
26602 kmem_cache_create("task_xstate", xstate_size,
26603 __alignof__(union thread_xstate),
26604- SLAB_PANIC | SLAB_NOTRACK, NULL);
26605+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26606 }
26607
26608 /*
26609@@ -105,7 +106,7 @@ void exit_thread(void)
26610 unsigned long *bp = t->io_bitmap_ptr;
26611
26612 if (bp) {
26613- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26614+ struct tss_struct *tss = init_tss + get_cpu();
26615
26616 t->io_bitmap_ptr = NULL;
26617 clear_thread_flag(TIF_IO_BITMAP);
26618@@ -125,6 +126,9 @@ void flush_thread(void)
26619 {
26620 struct task_struct *tsk = current;
26621
26622+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26623+ loadsegment(gs, 0);
26624+#endif
26625 flush_ptrace_hw_breakpoint(tsk);
26626 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26627 drop_init_fpu(tsk);
26628@@ -271,7 +275,7 @@ static void __exit_idle(void)
26629 void exit_idle(void)
26630 {
26631 /* idle loop has pid 0 */
26632- if (current->pid)
26633+ if (task_pid_nr(current))
26634 return;
26635 __exit_idle();
26636 }
26637@@ -324,7 +328,7 @@ bool xen_set_default_idle(void)
26638 return ret;
26639 }
26640 #endif
26641-void stop_this_cpu(void *dummy)
26642+__noreturn void stop_this_cpu(void *dummy)
26643 {
26644 local_irq_disable();
26645 /*
26646@@ -453,16 +457,37 @@ static int __init idle_setup(char *str)
26647 }
26648 early_param("idle", idle_setup);
26649
26650-unsigned long arch_align_stack(unsigned long sp)
26651+#ifdef CONFIG_PAX_RANDKSTACK
26652+void pax_randomize_kstack(struct pt_regs *regs)
26653 {
26654- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26655- sp -= get_random_int() % 8192;
26656- return sp & ~0xf;
26657-}
26658+ struct thread_struct *thread = &current->thread;
26659+ unsigned long time;
26660
26661-unsigned long arch_randomize_brk(struct mm_struct *mm)
26662-{
26663- unsigned long range_end = mm->brk + 0x02000000;
26664- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26665-}
26666+ if (!randomize_va_space)
26667+ return;
26668+
26669+ if (v8086_mode(regs))
26670+ return;
26671
26672+ rdtscl(time);
26673+
26674+ /* P4 seems to return a 0 LSB, ignore it */
26675+#ifdef CONFIG_MPENTIUM4
26676+ time &= 0x3EUL;
26677+ time <<= 2;
26678+#elif defined(CONFIG_X86_64)
26679+ time &= 0xFUL;
26680+ time <<= 4;
26681+#else
26682+ time &= 0x1FUL;
26683+ time <<= 3;
26684+#endif
26685+
26686+ thread->sp0 ^= time;
26687+ load_sp0(init_tss + smp_processor_id(), thread);
26688+
26689+#ifdef CONFIG_X86_64
26690+ this_cpu_write(kernel_stack, thread->sp0);
26691+#endif
26692+}
26693+#endif
26694diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26695index 7bc86bb..0ea06e8 100644
26696--- a/arch/x86/kernel/process_32.c
26697+++ b/arch/x86/kernel/process_32.c
26698@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26699 unsigned long thread_saved_pc(struct task_struct *tsk)
26700 {
26701 return ((unsigned long *)tsk->thread.sp)[3];
26702+//XXX return tsk->thread.eip;
26703 }
26704
26705 void __show_regs(struct pt_regs *regs, int all)
26706@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26707 unsigned long sp;
26708 unsigned short ss, gs;
26709
26710- if (user_mode_vm(regs)) {
26711+ if (user_mode(regs)) {
26712 sp = regs->sp;
26713 ss = regs->ss & 0xffff;
26714- gs = get_user_gs(regs);
26715 } else {
26716 sp = kernel_stack_pointer(regs);
26717 savesegment(ss, ss);
26718- savesegment(gs, gs);
26719 }
26720+ gs = get_user_gs(regs);
26721
26722 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26723 (u16)regs->cs, regs->ip, regs->flags,
26724- smp_processor_id());
26725+ raw_smp_processor_id());
26726 print_symbol("EIP is at %s\n", regs->ip);
26727
26728 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26729@@ -132,20 +132,21 @@ void release_thread(struct task_struct *dead_task)
26730 int copy_thread(unsigned long clone_flags, unsigned long sp,
26731 unsigned long arg, struct task_struct *p)
26732 {
26733- struct pt_regs *childregs = task_pt_regs(p);
26734+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26735 struct task_struct *tsk;
26736 int err;
26737
26738 p->thread.sp = (unsigned long) childregs;
26739 p->thread.sp0 = (unsigned long) (childregs+1);
26740+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
26741
26742 if (unlikely(p->flags & PF_KTHREAD)) {
26743 /* kernel thread */
26744 memset(childregs, 0, sizeof(struct pt_regs));
26745 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26746- task_user_gs(p) = __KERNEL_STACK_CANARY;
26747- childregs->ds = __USER_DS;
26748- childregs->es = __USER_DS;
26749+ savesegment(gs, childregs->gs);
26750+ childregs->ds = __KERNEL_DS;
26751+ childregs->es = __KERNEL_DS;
26752 childregs->fs = __KERNEL_PERCPU;
26753 childregs->bx = sp; /* function */
26754 childregs->bp = arg;
26755@@ -252,7 +253,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26756 struct thread_struct *prev = &prev_p->thread,
26757 *next = &next_p->thread;
26758 int cpu = smp_processor_id();
26759- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26760+ struct tss_struct *tss = init_tss + cpu;
26761 fpu_switch_t fpu;
26762
26763 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26764@@ -276,6 +277,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26765 */
26766 lazy_save_gs(prev->gs);
26767
26768+#ifdef CONFIG_PAX_MEMORY_UDEREF
26769+ __set_fs(task_thread_info(next_p)->addr_limit);
26770+#endif
26771+
26772 /*
26773 * Load the per-thread Thread-Local Storage descriptor.
26774 */
26775@@ -314,9 +319,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26776 */
26777 arch_end_context_switch(next_p);
26778
26779- this_cpu_write(kernel_stack,
26780- (unsigned long)task_stack_page(next_p) +
26781- THREAD_SIZE - KERNEL_STACK_OFFSET);
26782+ this_cpu_write(current_task, next_p);
26783+ this_cpu_write(current_tinfo, &next_p->tinfo);
26784+ this_cpu_write(kernel_stack, next->sp0);
26785
26786 /*
26787 * Restore %gs if needed (which is common)
26788@@ -326,8 +331,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26789
26790 switch_fpu_finish(next_p, fpu);
26791
26792- this_cpu_write(current_task, next_p);
26793-
26794 return prev_p;
26795 }
26796
26797@@ -357,4 +360,3 @@ unsigned long get_wchan(struct task_struct *p)
26798 } while (count++ < 16);
26799 return 0;
26800 }
26801-
26802diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26803index ca5b02d..c0b2f6a 100644
26804--- a/arch/x86/kernel/process_64.c
26805+++ b/arch/x86/kernel/process_64.c
26806@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26807 struct pt_regs *childregs;
26808 struct task_struct *me = current;
26809
26810- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26811+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26812 childregs = task_pt_regs(p);
26813 p->thread.sp = (unsigned long) childregs;
26814 p->thread.usersp = me->thread.usersp;
26815+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
26816 set_tsk_thread_flag(p, TIF_FORK);
26817 p->thread.fpu_counter = 0;
26818 p->thread.io_bitmap_ptr = NULL;
26819@@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26820 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26821 savesegment(es, p->thread.es);
26822 savesegment(ds, p->thread.ds);
26823+ savesegment(ss, p->thread.ss);
26824+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26825 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26826
26827 if (unlikely(p->flags & PF_KTHREAD)) {
26828@@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26829 struct thread_struct *prev = &prev_p->thread;
26830 struct thread_struct *next = &next_p->thread;
26831 int cpu = smp_processor_id();
26832- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26833+ struct tss_struct *tss = init_tss + cpu;
26834 unsigned fsindex, gsindex;
26835 fpu_switch_t fpu;
26836
26837@@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26838 if (unlikely(next->ds | prev->ds))
26839 loadsegment(ds, next->ds);
26840
26841+ savesegment(ss, prev->ss);
26842+ if (unlikely(next->ss != prev->ss))
26843+ loadsegment(ss, next->ss);
26844
26845 /* We must save %fs and %gs before load_TLS() because
26846 * %fs and %gs may be cleared by load_TLS().
26847@@ -362,6 +368,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26848 prev->usersp = this_cpu_read(old_rsp);
26849 this_cpu_write(old_rsp, next->usersp);
26850 this_cpu_write(current_task, next_p);
26851+ this_cpu_write(current_tinfo, &next_p->tinfo);
26852
26853 /*
26854 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26855@@ -371,9 +378,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26856 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26857 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26858
26859- this_cpu_write(kernel_stack,
26860- (unsigned long)task_stack_page(next_p) +
26861- THREAD_SIZE - KERNEL_STACK_OFFSET);
26862+ this_cpu_write(kernel_stack, next->sp0);
26863
26864 /*
26865 * Now maybe reload the debug registers and handle I/O bitmaps
26866@@ -443,12 +448,11 @@ unsigned long get_wchan(struct task_struct *p)
26867 if (!p || p == current || p->state == TASK_RUNNING)
26868 return 0;
26869 stack = (unsigned long)task_stack_page(p);
26870- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26871+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26872 return 0;
26873 fp = *(u64 *)(p->thread.sp);
26874 do {
26875- if (fp < (unsigned long)stack ||
26876- fp >= (unsigned long)stack+THREAD_SIZE)
26877+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26878 return 0;
26879 ip = *(u64 *)(fp+8);
26880 if (!in_sched_functions(ip))
26881diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26882index 678c0ad..2fc2a7b 100644
26883--- a/arch/x86/kernel/ptrace.c
26884+++ b/arch/x86/kernel/ptrace.c
26885@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26886 unsigned long sp = (unsigned long)&regs->sp;
26887 u32 *prev_esp;
26888
26889- if (context == (sp & ~(THREAD_SIZE - 1)))
26890+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26891 return sp;
26892
26893- prev_esp = (u32 *)(context);
26894+ prev_esp = *(u32 **)(context);
26895 if (prev_esp)
26896 return (unsigned long)prev_esp;
26897
26898@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26899 if (child->thread.gs != value)
26900 return do_arch_prctl(child, ARCH_SET_GS, value);
26901 return 0;
26902+
26903+ case offsetof(struct user_regs_struct,ip):
26904+ /*
26905+ * Protect against any attempt to set ip to an
26906+ * impossible address. There are dragons lurking if the
26907+ * address is noncanonical. (This explicitly allows
26908+ * setting ip to TASK_SIZE_MAX, because user code can do
26909+ * that all by itself by running off the end of its
26910+ * address space.
26911+ */
26912+ if (value > TASK_SIZE_MAX)
26913+ return -EIO;
26914+ break;
26915+
26916 #endif
26917 }
26918
26919@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26920 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26921 {
26922 int i;
26923- int dr7 = 0;
26924+ unsigned long dr7 = 0;
26925 struct arch_hw_breakpoint *info;
26926
26927 for (i = 0; i < HBP_NUM; i++) {
26928@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26929 unsigned long addr, unsigned long data)
26930 {
26931 int ret;
26932- unsigned long __user *datap = (unsigned long __user *)data;
26933+ unsigned long __user *datap = (__force unsigned long __user *)data;
26934
26935 switch (request) {
26936 /* read the word at location addr in the USER area. */
26937@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26938 if ((int) addr < 0)
26939 return -EIO;
26940 ret = do_get_thread_area(child, addr,
26941- (struct user_desc __user *)data);
26942+ (__force struct user_desc __user *) data);
26943 break;
26944
26945 case PTRACE_SET_THREAD_AREA:
26946 if ((int) addr < 0)
26947 return -EIO;
26948 ret = do_set_thread_area(child, addr,
26949- (struct user_desc __user *)data, 0);
26950+ (__force struct user_desc __user *) data, 0);
26951 break;
26952 #endif
26953
26954@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26955
26956 #ifdef CONFIG_X86_64
26957
26958-static struct user_regset x86_64_regsets[] __read_mostly = {
26959+static user_regset_no_const x86_64_regsets[] __read_only = {
26960 [REGSET_GENERAL] = {
26961 .core_note_type = NT_PRSTATUS,
26962 .n = sizeof(struct user_regs_struct) / sizeof(long),
26963@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26964 #endif /* CONFIG_X86_64 */
26965
26966 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26967-static struct user_regset x86_32_regsets[] __read_mostly = {
26968+static user_regset_no_const x86_32_regsets[] __read_only = {
26969 [REGSET_GENERAL] = {
26970 .core_note_type = NT_PRSTATUS,
26971 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26972@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26973 */
26974 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26975
26976-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26977+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26978 {
26979 #ifdef CONFIG_X86_64
26980 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26981@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26982 memset(info, 0, sizeof(*info));
26983 info->si_signo = SIGTRAP;
26984 info->si_code = si_code;
26985- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26986+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26987 }
26988
26989 void user_single_step_siginfo(struct task_struct *tsk,
26990@@ -1450,6 +1464,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
26991 # define IS_IA32 0
26992 #endif
26993
26994+#ifdef CONFIG_GRKERNSEC_SETXID
26995+extern void gr_delayed_cred_worker(void);
26996+#endif
26997+
26998 /*
26999 * We must return the syscall number to actually look up in the table.
27000 * This can be -1L to skip running any syscall at all.
27001@@ -1460,6 +1478,11 @@ long syscall_trace_enter(struct pt_regs *regs)
27002
27003 user_exit();
27004
27005+#ifdef CONFIG_GRKERNSEC_SETXID
27006+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27007+ gr_delayed_cred_worker();
27008+#endif
27009+
27010 /*
27011 * If we stepped into a sysenter/syscall insn, it trapped in
27012 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
27013@@ -1515,6 +1538,11 @@ void syscall_trace_leave(struct pt_regs *regs)
27014 */
27015 user_exit();
27016
27017+#ifdef CONFIG_GRKERNSEC_SETXID
27018+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27019+ gr_delayed_cred_worker();
27020+#endif
27021+
27022 audit_syscall_exit(regs);
27023
27024 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
27025diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
27026index 2f355d2..e75ed0a 100644
27027--- a/arch/x86/kernel/pvclock.c
27028+++ b/arch/x86/kernel/pvclock.c
27029@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
27030 reset_hung_task_detector();
27031 }
27032
27033-static atomic64_t last_value = ATOMIC64_INIT(0);
27034+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
27035
27036 void pvclock_resume(void)
27037 {
27038- atomic64_set(&last_value, 0);
27039+ atomic64_set_unchecked(&last_value, 0);
27040 }
27041
27042 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
27043@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
27044 * updating at the same time, and one of them could be slightly behind,
27045 * making the assumption that last_value always go forward fail to hold.
27046 */
27047- last = atomic64_read(&last_value);
27048+ last = atomic64_read_unchecked(&last_value);
27049 do {
27050 if (ret < last)
27051 return last;
27052- last = atomic64_cmpxchg(&last_value, last, ret);
27053+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
27054 } while (unlikely(last != ret));
27055
27056 return ret;
27057diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
27058index 52b1157..c6e67c4 100644
27059--- a/arch/x86/kernel/reboot.c
27060+++ b/arch/x86/kernel/reboot.c
27061@@ -68,6 +68,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
27062
27063 void __noreturn machine_real_restart(unsigned int type)
27064 {
27065+
27066+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
27067+ struct desc_struct *gdt;
27068+#endif
27069+
27070 local_irq_disable();
27071
27072 /*
27073@@ -95,7 +100,29 @@ void __noreturn machine_real_restart(unsigned int type)
27074
27075 /* Jump to the identity-mapped low memory code */
27076 #ifdef CONFIG_X86_32
27077- asm volatile("jmpl *%0" : :
27078+
27079+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
27080+ gdt = get_cpu_gdt_table(smp_processor_id());
27081+ pax_open_kernel();
27082+#ifdef CONFIG_PAX_MEMORY_UDEREF
27083+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
27084+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
27085+ loadsegment(ds, __KERNEL_DS);
27086+ loadsegment(es, __KERNEL_DS);
27087+ loadsegment(ss, __KERNEL_DS);
27088+#endif
27089+#ifdef CONFIG_PAX_KERNEXEC
27090+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
27091+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
27092+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
27093+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
27094+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
27095+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
27096+#endif
27097+ pax_close_kernel();
27098+#endif
27099+
27100+ asm volatile("ljmpl *%0" : :
27101 "rm" (real_mode_header->machine_real_restart_asm),
27102 "a" (type));
27103 #else
27104@@ -486,7 +513,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27105 * This means that this function can never return, it can misbehave
27106 * by not rebooting properly and hanging.
27107 */
27108-static void native_machine_emergency_restart(void)
27109+static void __noreturn native_machine_emergency_restart(void)
27110 {
27111 int i;
27112 int attempt = 0;
27113@@ -610,13 +637,13 @@ void native_machine_shutdown(void)
27114 #endif
27115 }
27116
27117-static void __machine_emergency_restart(int emergency)
27118+static void __noreturn __machine_emergency_restart(int emergency)
27119 {
27120 reboot_emergency = emergency;
27121 machine_ops.emergency_restart();
27122 }
27123
27124-static void native_machine_restart(char *__unused)
27125+static void __noreturn native_machine_restart(char *__unused)
27126 {
27127 pr_notice("machine restart\n");
27128
27129@@ -625,7 +652,7 @@ static void native_machine_restart(char *__unused)
27130 __machine_emergency_restart(0);
27131 }
27132
27133-static void native_machine_halt(void)
27134+static void __noreturn native_machine_halt(void)
27135 {
27136 /* Stop other cpus and apics */
27137 machine_shutdown();
27138@@ -635,7 +662,7 @@ static void native_machine_halt(void)
27139 stop_this_cpu(NULL);
27140 }
27141
27142-static void native_machine_power_off(void)
27143+static void __noreturn native_machine_power_off(void)
27144 {
27145 if (pm_power_off) {
27146 if (!reboot_force)
27147@@ -644,9 +671,10 @@ static void native_machine_power_off(void)
27148 }
27149 /* A fallback in case there is no PM info available */
27150 tboot_shutdown(TB_SHUTDOWN_HALT);
27151+ unreachable();
27152 }
27153
27154-struct machine_ops machine_ops = {
27155+struct machine_ops machine_ops __read_only = {
27156 .power_off = native_machine_power_off,
27157 .shutdown = native_machine_shutdown,
27158 .emergency_restart = native_machine_emergency_restart,
27159diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27160index c8e41e9..64049ef 100644
27161--- a/arch/x86/kernel/reboot_fixups_32.c
27162+++ b/arch/x86/kernel/reboot_fixups_32.c
27163@@ -57,7 +57,7 @@ struct device_fixup {
27164 unsigned int vendor;
27165 unsigned int device;
27166 void (*reboot_fixup)(struct pci_dev *);
27167-};
27168+} __do_const;
27169
27170 /*
27171 * PCI ids solely used for fixups_table go here
27172diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27173index 3fd2c69..a444264 100644
27174--- a/arch/x86/kernel/relocate_kernel_64.S
27175+++ b/arch/x86/kernel/relocate_kernel_64.S
27176@@ -96,8 +96,7 @@ relocate_kernel:
27177
27178 /* jump to identity mapped page */
27179 addq $(identity_mapped - relocate_kernel), %r8
27180- pushq %r8
27181- ret
27182+ jmp *%r8
27183
27184 identity_mapped:
27185 /* set return address to 0 if not preserving context */
27186diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27187index 78a0e62..5c2e510 100644
27188--- a/arch/x86/kernel/setup.c
27189+++ b/arch/x86/kernel/setup.c
27190@@ -110,6 +110,7 @@
27191 #include <asm/mce.h>
27192 #include <asm/alternative.h>
27193 #include <asm/prom.h>
27194+#include <asm/boot.h>
27195
27196 /*
27197 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27198@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27199 #endif
27200
27201
27202-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27203-__visible unsigned long mmu_cr4_features;
27204+#ifdef CONFIG_X86_64
27205+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27206+#elif defined(CONFIG_X86_PAE)
27207+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27208 #else
27209-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27210+__visible unsigned long mmu_cr4_features __read_only;
27211 #endif
27212
27213+void set_in_cr4(unsigned long mask)
27214+{
27215+ unsigned long cr4 = read_cr4();
27216+
27217+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27218+ return;
27219+
27220+ pax_open_kernel();
27221+ mmu_cr4_features |= mask;
27222+ pax_close_kernel();
27223+
27224+ if (trampoline_cr4_features)
27225+ *trampoline_cr4_features = mmu_cr4_features;
27226+ cr4 |= mask;
27227+ write_cr4(cr4);
27228+}
27229+EXPORT_SYMBOL(set_in_cr4);
27230+
27231+void clear_in_cr4(unsigned long mask)
27232+{
27233+ unsigned long cr4 = read_cr4();
27234+
27235+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27236+ return;
27237+
27238+ pax_open_kernel();
27239+ mmu_cr4_features &= ~mask;
27240+ pax_close_kernel();
27241+
27242+ if (trampoline_cr4_features)
27243+ *trampoline_cr4_features = mmu_cr4_features;
27244+ cr4 &= ~mask;
27245+ write_cr4(cr4);
27246+}
27247+EXPORT_SYMBOL(clear_in_cr4);
27248+
27249 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27250 int bootloader_type, bootloader_version;
27251
27252@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27253 * area (640->1Mb) as ram even though it is not.
27254 * take them out.
27255 */
27256- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27257+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27258
27259 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27260 }
27261@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27262 /* called before trim_bios_range() to spare extra sanitize */
27263 static void __init e820_add_kernel_range(void)
27264 {
27265- u64 start = __pa_symbol(_text);
27266+ u64 start = __pa_symbol(ktla_ktva(_text));
27267 u64 size = __pa_symbol(_end) - start;
27268
27269 /*
27270@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27271
27272 void __init setup_arch(char **cmdline_p)
27273 {
27274+#ifdef CONFIG_X86_32
27275+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27276+#else
27277 memblock_reserve(__pa_symbol(_text),
27278 (unsigned long)__bss_stop - (unsigned long)_text);
27279+#endif
27280
27281 early_reserve_initrd();
27282
27283@@ -946,14 +989,14 @@ void __init setup_arch(char **cmdline_p)
27284
27285 if (!boot_params.hdr.root_flags)
27286 root_mountflags &= ~MS_RDONLY;
27287- init_mm.start_code = (unsigned long) _text;
27288- init_mm.end_code = (unsigned long) _etext;
27289+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27290+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27291 init_mm.end_data = (unsigned long) _edata;
27292 init_mm.brk = _brk_end;
27293
27294- code_resource.start = __pa_symbol(_text);
27295- code_resource.end = __pa_symbol(_etext)-1;
27296- data_resource.start = __pa_symbol(_etext);
27297+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27298+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27299+ data_resource.start = __pa_symbol(_sdata);
27300 data_resource.end = __pa_symbol(_edata)-1;
27301 bss_resource.start = __pa_symbol(__bss_start);
27302 bss_resource.end = __pa_symbol(__bss_stop)-1;
27303diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27304index 5cdff03..80fa283 100644
27305--- a/arch/x86/kernel/setup_percpu.c
27306+++ b/arch/x86/kernel/setup_percpu.c
27307@@ -21,19 +21,17 @@
27308 #include <asm/cpu.h>
27309 #include <asm/stackprotector.h>
27310
27311-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27312+#ifdef CONFIG_SMP
27313+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27314 EXPORT_PER_CPU_SYMBOL(cpu_number);
27315+#endif
27316
27317-#ifdef CONFIG_X86_64
27318 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27319-#else
27320-#define BOOT_PERCPU_OFFSET 0
27321-#endif
27322
27323 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27324 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27325
27326-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27327+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27328 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27329 };
27330 EXPORT_SYMBOL(__per_cpu_offset);
27331@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27332 {
27333 #ifdef CONFIG_NEED_MULTIPLE_NODES
27334 pg_data_t *last = NULL;
27335- unsigned int cpu;
27336+ int cpu;
27337
27338 for_each_possible_cpu(cpu) {
27339 int node = early_cpu_to_node(cpu);
27340@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27341 {
27342 #ifdef CONFIG_X86_32
27343 struct desc_struct gdt;
27344+ unsigned long base = per_cpu_offset(cpu);
27345
27346- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27347- 0x2 | DESCTYPE_S, 0x8);
27348- gdt.s = 1;
27349+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27350+ 0x83 | DESCTYPE_S, 0xC);
27351 write_gdt_entry(get_cpu_gdt_table(cpu),
27352 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27353 #endif
27354@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27355 /* alrighty, percpu areas up and running */
27356 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27357 for_each_possible_cpu(cpu) {
27358+#ifdef CONFIG_CC_STACKPROTECTOR
27359+#ifdef CONFIG_X86_32
27360+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27361+#endif
27362+#endif
27363 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27364 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27365 per_cpu(cpu_number, cpu) = cpu;
27366@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27367 */
27368 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27369 #endif
27370+#ifdef CONFIG_CC_STACKPROTECTOR
27371+#ifdef CONFIG_X86_32
27372+ if (!cpu)
27373+ per_cpu(stack_canary.canary, cpu) = canary;
27374+#endif
27375+#endif
27376 /*
27377 * Up to this point, the boot CPU has been using .init.data
27378 * area. Reload any changed state for the boot CPU.
27379diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27380index 2851d63..83bf567 100644
27381--- a/arch/x86/kernel/signal.c
27382+++ b/arch/x86/kernel/signal.c
27383@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27384 * Align the stack pointer according to the i386 ABI,
27385 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27386 */
27387- sp = ((sp + 4) & -16ul) - 4;
27388+ sp = ((sp - 12) & -16ul) - 4;
27389 #else /* !CONFIG_X86_32 */
27390 sp = round_down(sp, 16) - 8;
27391 #endif
27392@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27393 }
27394
27395 if (current->mm->context.vdso)
27396- restorer = current->mm->context.vdso +
27397- selected_vdso32->sym___kernel_sigreturn;
27398+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27399 else
27400- restorer = &frame->retcode;
27401+ restorer = (void __user *)&frame->retcode;
27402 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27403 restorer = ksig->ka.sa.sa_restorer;
27404
27405@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27406 * reasons and because gdb uses it as a signature to notice
27407 * signal handler stack frames.
27408 */
27409- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27410+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27411
27412 if (err)
27413 return -EFAULT;
27414@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27415 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27416
27417 /* Set up to return from userspace. */
27418- restorer = current->mm->context.vdso +
27419- selected_vdso32->sym___kernel_rt_sigreturn;
27420+ if (current->mm->context.vdso)
27421+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27422+ else
27423+ restorer = (void __user *)&frame->retcode;
27424 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27425 restorer = ksig->ka.sa.sa_restorer;
27426 put_user_ex(restorer, &frame->pretcode);
27427@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27428 * reasons and because gdb uses it as a signature to notice
27429 * signal handler stack frames.
27430 */
27431- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27432+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27433 } put_user_catch(err);
27434
27435 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27436@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27437 {
27438 int usig = signr_convert(ksig->sig);
27439 sigset_t *set = sigmask_to_save();
27440- compat_sigset_t *cset = (compat_sigset_t *) set;
27441+ sigset_t sigcopy;
27442+ compat_sigset_t *cset;
27443+
27444+ sigcopy = *set;
27445+
27446+ cset = (compat_sigset_t *) &sigcopy;
27447
27448 /* Set up the stack frame */
27449 if (is_ia32_frame()) {
27450@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27451 } else if (is_x32_frame()) {
27452 return x32_setup_rt_frame(ksig, cset, regs);
27453 } else {
27454- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27455+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27456 }
27457 }
27458
27459diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27460index be8e1bd..a3d93fa 100644
27461--- a/arch/x86/kernel/smp.c
27462+++ b/arch/x86/kernel/smp.c
27463@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27464
27465 __setup("nonmi_ipi", nonmi_ipi_setup);
27466
27467-struct smp_ops smp_ops = {
27468+struct smp_ops smp_ops __read_only = {
27469 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27470 .smp_prepare_cpus = native_smp_prepare_cpus,
27471 .smp_cpus_done = native_smp_cpus_done,
27472diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27473index 5492798..a3bd4f2 100644
27474--- a/arch/x86/kernel/smpboot.c
27475+++ b/arch/x86/kernel/smpboot.c
27476@@ -230,14 +230,17 @@ static void notrace start_secondary(void *unused)
27477
27478 enable_start_cpu0 = 0;
27479
27480-#ifdef CONFIG_X86_32
27481+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27482+ barrier();
27483+
27484 /* switch away from the initial page table */
27485+#ifdef CONFIG_PAX_PER_CPU_PGD
27486+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27487+#else
27488 load_cr3(swapper_pg_dir);
27489+#endif
27490 __flush_tlb_all();
27491-#endif
27492
27493- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27494- barrier();
27495 /*
27496 * Check TSC synchronization with the BP:
27497 */
27498@@ -764,8 +767,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27499 alternatives_enable_smp();
27500
27501 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27502- (THREAD_SIZE + task_stack_page(idle))) - 1);
27503+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27504 per_cpu(current_task, cpu) = idle;
27505+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27506
27507 #ifdef CONFIG_X86_32
27508 /* Stack for startup_32 can be just as for start_secondary onwards */
27509@@ -774,10 +778,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27510 clear_tsk_thread_flag(idle, TIF_FORK);
27511 initial_gs = per_cpu_offset(cpu);
27512 #endif
27513- per_cpu(kernel_stack, cpu) =
27514- (unsigned long)task_stack_page(idle) -
27515- KERNEL_STACK_OFFSET + THREAD_SIZE;
27516+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27517+ pax_open_kernel();
27518 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27519+ pax_close_kernel();
27520 initial_code = (unsigned long)start_secondary;
27521 stack_start = idle->thread.sp;
27522
27523@@ -923,6 +927,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27524 /* the FPU context is blank, nobody can own it */
27525 __cpu_disable_lazy_restore(cpu);
27526
27527+#ifdef CONFIG_PAX_PER_CPU_PGD
27528+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27529+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27530+ KERNEL_PGD_PTRS);
27531+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27532+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27533+ KERNEL_PGD_PTRS);
27534+#endif
27535+
27536 err = do_boot_cpu(apicid, cpu, tidle);
27537 if (err) {
27538 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27539diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27540index 9b4d51d..5d28b58 100644
27541--- a/arch/x86/kernel/step.c
27542+++ b/arch/x86/kernel/step.c
27543@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27544 struct desc_struct *desc;
27545 unsigned long base;
27546
27547- seg &= ~7UL;
27548+ seg >>= 3;
27549
27550 mutex_lock(&child->mm->context.lock);
27551- if (unlikely((seg >> 3) >= child->mm->context.size))
27552+ if (unlikely(seg >= child->mm->context.size))
27553 addr = -1L; /* bogus selector, access would fault */
27554 else {
27555 desc = child->mm->context.ldt + seg;
27556@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27557 addr += base;
27558 }
27559 mutex_unlock(&child->mm->context.lock);
27560- }
27561+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27562+ addr = ktla_ktva(addr);
27563
27564 return addr;
27565 }
27566@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27567 unsigned char opcode[15];
27568 unsigned long addr = convert_ip_to_linear(child, regs);
27569
27570+ if (addr == -EINVAL)
27571+ return 0;
27572+
27573 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27574 for (i = 0; i < copied; i++) {
27575 switch (opcode[i]) {
27576diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27577new file mode 100644
27578index 0000000..5877189
27579--- /dev/null
27580+++ b/arch/x86/kernel/sys_i386_32.c
27581@@ -0,0 +1,189 @@
27582+/*
27583+ * This file contains various random system calls that
27584+ * have a non-standard calling sequence on the Linux/i386
27585+ * platform.
27586+ */
27587+
27588+#include <linux/errno.h>
27589+#include <linux/sched.h>
27590+#include <linux/mm.h>
27591+#include <linux/fs.h>
27592+#include <linux/smp.h>
27593+#include <linux/sem.h>
27594+#include <linux/msg.h>
27595+#include <linux/shm.h>
27596+#include <linux/stat.h>
27597+#include <linux/syscalls.h>
27598+#include <linux/mman.h>
27599+#include <linux/file.h>
27600+#include <linux/utsname.h>
27601+#include <linux/ipc.h>
27602+#include <linux/elf.h>
27603+
27604+#include <linux/uaccess.h>
27605+#include <linux/unistd.h>
27606+
27607+#include <asm/syscalls.h>
27608+
27609+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27610+{
27611+ unsigned long pax_task_size = TASK_SIZE;
27612+
27613+#ifdef CONFIG_PAX_SEGMEXEC
27614+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27615+ pax_task_size = SEGMEXEC_TASK_SIZE;
27616+#endif
27617+
27618+ if (flags & MAP_FIXED)
27619+ if (len > pax_task_size || addr > pax_task_size - len)
27620+ return -EINVAL;
27621+
27622+ return 0;
27623+}
27624+
27625+/*
27626+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27627+ */
27628+static unsigned long get_align_mask(void)
27629+{
27630+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27631+ return 0;
27632+
27633+ if (!(current->flags & PF_RANDOMIZE))
27634+ return 0;
27635+
27636+ return va_align.mask;
27637+}
27638+
27639+unsigned long
27640+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27641+ unsigned long len, unsigned long pgoff, unsigned long flags)
27642+{
27643+ struct mm_struct *mm = current->mm;
27644+ struct vm_area_struct *vma;
27645+ unsigned long pax_task_size = TASK_SIZE;
27646+ struct vm_unmapped_area_info info;
27647+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27648+
27649+#ifdef CONFIG_PAX_SEGMEXEC
27650+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27651+ pax_task_size = SEGMEXEC_TASK_SIZE;
27652+#endif
27653+
27654+ pax_task_size -= PAGE_SIZE;
27655+
27656+ if (len > pax_task_size)
27657+ return -ENOMEM;
27658+
27659+ if (flags & MAP_FIXED)
27660+ return addr;
27661+
27662+#ifdef CONFIG_PAX_RANDMMAP
27663+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27664+#endif
27665+
27666+ if (addr) {
27667+ addr = PAGE_ALIGN(addr);
27668+ if (pax_task_size - len >= addr) {
27669+ vma = find_vma(mm, addr);
27670+ if (check_heap_stack_gap(vma, addr, len, offset))
27671+ return addr;
27672+ }
27673+ }
27674+
27675+ info.flags = 0;
27676+ info.length = len;
27677+ info.align_mask = filp ? get_align_mask() : 0;
27678+ info.align_offset = pgoff << PAGE_SHIFT;
27679+ info.threadstack_offset = offset;
27680+
27681+#ifdef CONFIG_PAX_PAGEEXEC
27682+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27683+ info.low_limit = 0x00110000UL;
27684+ info.high_limit = mm->start_code;
27685+
27686+#ifdef CONFIG_PAX_RANDMMAP
27687+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27688+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27689+#endif
27690+
27691+ if (info.low_limit < info.high_limit) {
27692+ addr = vm_unmapped_area(&info);
27693+ if (!IS_ERR_VALUE(addr))
27694+ return addr;
27695+ }
27696+ } else
27697+#endif
27698+
27699+ info.low_limit = mm->mmap_base;
27700+ info.high_limit = pax_task_size;
27701+
27702+ return vm_unmapped_area(&info);
27703+}
27704+
27705+unsigned long
27706+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27707+ const unsigned long len, const unsigned long pgoff,
27708+ const unsigned long flags)
27709+{
27710+ struct vm_area_struct *vma;
27711+ struct mm_struct *mm = current->mm;
27712+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27713+ struct vm_unmapped_area_info info;
27714+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27715+
27716+#ifdef CONFIG_PAX_SEGMEXEC
27717+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27718+ pax_task_size = SEGMEXEC_TASK_SIZE;
27719+#endif
27720+
27721+ pax_task_size -= PAGE_SIZE;
27722+
27723+ /* requested length too big for entire address space */
27724+ if (len > pax_task_size)
27725+ return -ENOMEM;
27726+
27727+ if (flags & MAP_FIXED)
27728+ return addr;
27729+
27730+#ifdef CONFIG_PAX_PAGEEXEC
27731+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27732+ goto bottomup;
27733+#endif
27734+
27735+#ifdef CONFIG_PAX_RANDMMAP
27736+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27737+#endif
27738+
27739+ /* requesting a specific address */
27740+ if (addr) {
27741+ addr = PAGE_ALIGN(addr);
27742+ if (pax_task_size - len >= addr) {
27743+ vma = find_vma(mm, addr);
27744+ if (check_heap_stack_gap(vma, addr, len, offset))
27745+ return addr;
27746+ }
27747+ }
27748+
27749+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27750+ info.length = len;
27751+ info.low_limit = PAGE_SIZE;
27752+ info.high_limit = mm->mmap_base;
27753+ info.align_mask = filp ? get_align_mask() : 0;
27754+ info.align_offset = pgoff << PAGE_SHIFT;
27755+ info.threadstack_offset = offset;
27756+
27757+ addr = vm_unmapped_area(&info);
27758+ if (!(addr & ~PAGE_MASK))
27759+ return addr;
27760+ VM_BUG_ON(addr != -ENOMEM);
27761+
27762+bottomup:
27763+ /*
27764+ * A failed mmap() very likely causes application failure,
27765+ * so fall back to the bottom-up function here. This scenario
27766+ * can happen with large stack limits and large mmap()
27767+ * allocations.
27768+ */
27769+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27770+}
27771diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27772index 30277e2..5664a29 100644
27773--- a/arch/x86/kernel/sys_x86_64.c
27774+++ b/arch/x86/kernel/sys_x86_64.c
27775@@ -81,8 +81,8 @@ out:
27776 return error;
27777 }
27778
27779-static void find_start_end(unsigned long flags, unsigned long *begin,
27780- unsigned long *end)
27781+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27782+ unsigned long *begin, unsigned long *end)
27783 {
27784 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27785 unsigned long new_begin;
27786@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27787 *begin = new_begin;
27788 }
27789 } else {
27790- *begin = current->mm->mmap_legacy_base;
27791+ *begin = mm->mmap_legacy_base;
27792 *end = TASK_SIZE;
27793 }
27794 }
27795@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27796 struct vm_area_struct *vma;
27797 struct vm_unmapped_area_info info;
27798 unsigned long begin, end;
27799+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27800
27801 if (flags & MAP_FIXED)
27802 return addr;
27803
27804- find_start_end(flags, &begin, &end);
27805+ find_start_end(mm, flags, &begin, &end);
27806
27807 if (len > end)
27808 return -ENOMEM;
27809
27810+#ifdef CONFIG_PAX_RANDMMAP
27811+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27812+#endif
27813+
27814 if (addr) {
27815 addr = PAGE_ALIGN(addr);
27816 vma = find_vma(mm, addr);
27817- if (end - len >= addr &&
27818- (!vma || addr + len <= vma->vm_start))
27819+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27820 return addr;
27821 }
27822
27823@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27824 info.high_limit = end;
27825 info.align_mask = filp ? get_align_mask() : 0;
27826 info.align_offset = pgoff << PAGE_SHIFT;
27827+ info.threadstack_offset = offset;
27828 return vm_unmapped_area(&info);
27829 }
27830
27831@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27832 struct mm_struct *mm = current->mm;
27833 unsigned long addr = addr0;
27834 struct vm_unmapped_area_info info;
27835+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27836
27837 /* requested length too big for entire address space */
27838 if (len > TASK_SIZE)
27839@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27840 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27841 goto bottomup;
27842
27843+#ifdef CONFIG_PAX_RANDMMAP
27844+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27845+#endif
27846+
27847 /* requesting a specific address */
27848 if (addr) {
27849 addr = PAGE_ALIGN(addr);
27850 vma = find_vma(mm, addr);
27851- if (TASK_SIZE - len >= addr &&
27852- (!vma || addr + len <= vma->vm_start))
27853+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27854 return addr;
27855 }
27856
27857@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27858 info.high_limit = mm->mmap_base;
27859 info.align_mask = filp ? get_align_mask() : 0;
27860 info.align_offset = pgoff << PAGE_SHIFT;
27861+ info.threadstack_offset = offset;
27862 addr = vm_unmapped_area(&info);
27863 if (!(addr & ~PAGE_MASK))
27864 return addr;
27865diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27866index 91a4496..bb87552 100644
27867--- a/arch/x86/kernel/tboot.c
27868+++ b/arch/x86/kernel/tboot.c
27869@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27870
27871 void tboot_shutdown(u32 shutdown_type)
27872 {
27873- void (*shutdown)(void);
27874+ void (* __noreturn shutdown)(void);
27875
27876 if (!tboot_enabled())
27877 return;
27878@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27879
27880 switch_to_tboot_pt();
27881
27882- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27883+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27884 shutdown();
27885
27886 /* should not reach here */
27887@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27888 return -ENODEV;
27889 }
27890
27891-static atomic_t ap_wfs_count;
27892+static atomic_unchecked_t ap_wfs_count;
27893
27894 static int tboot_wait_for_aps(int num_aps)
27895 {
27896@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27897 {
27898 switch (action) {
27899 case CPU_DYING:
27900- atomic_inc(&ap_wfs_count);
27901+ atomic_inc_unchecked(&ap_wfs_count);
27902 if (num_online_cpus() == 1)
27903- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27904+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27905 return NOTIFY_BAD;
27906 break;
27907 }
27908@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27909
27910 tboot_create_trampoline();
27911
27912- atomic_set(&ap_wfs_count, 0);
27913+ atomic_set_unchecked(&ap_wfs_count, 0);
27914 register_hotcpu_notifier(&tboot_cpu_notifier);
27915
27916 #ifdef CONFIG_DEBUG_FS
27917diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27918index bf7ef5c..59d0ac9 100644
27919--- a/arch/x86/kernel/time.c
27920+++ b/arch/x86/kernel/time.c
27921@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27922 {
27923 unsigned long pc = instruction_pointer(regs);
27924
27925- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27926+ if (!user_mode(regs) && in_lock_functions(pc)) {
27927 #ifdef CONFIG_FRAME_POINTER
27928- return *(unsigned long *)(regs->bp + sizeof(long));
27929+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27930 #else
27931 unsigned long *sp =
27932 (unsigned long *)kernel_stack_pointer(regs);
27933@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27934 * or above a saved flags. Eflags has bits 22-31 zero,
27935 * kernel addresses don't.
27936 */
27937+
27938+#ifdef CONFIG_PAX_KERNEXEC
27939+ return ktla_ktva(sp[0]);
27940+#else
27941 if (sp[0] >> 22)
27942 return sp[0];
27943 if (sp[1] >> 22)
27944 return sp[1];
27945 #endif
27946+
27947+#endif
27948 }
27949 return pc;
27950 }
27951diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27952index f7fec09..9991981 100644
27953--- a/arch/x86/kernel/tls.c
27954+++ b/arch/x86/kernel/tls.c
27955@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27956 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27957 return -EINVAL;
27958
27959+#ifdef CONFIG_PAX_SEGMEXEC
27960+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27961+ return -EINVAL;
27962+#endif
27963+
27964 set_tls_desc(p, idx, &info, 1);
27965
27966 return 0;
27967@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27968
27969 if (kbuf)
27970 info = kbuf;
27971- else if (__copy_from_user(infobuf, ubuf, count))
27972+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27973 return -EFAULT;
27974 else
27975 info = infobuf;
27976diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27977index 1c113db..287b42e 100644
27978--- a/arch/x86/kernel/tracepoint.c
27979+++ b/arch/x86/kernel/tracepoint.c
27980@@ -9,11 +9,11 @@
27981 #include <linux/atomic.h>
27982
27983 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27984-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27985+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27986 (unsigned long) trace_idt_table };
27987
27988 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27989-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27990+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27991
27992 static int trace_irq_vector_refcount;
27993 static DEFINE_MUTEX(irq_vector_mutex);
27994diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27995index 0d0e922..0886373 100644
27996--- a/arch/x86/kernel/traps.c
27997+++ b/arch/x86/kernel/traps.c
27998@@ -67,7 +67,7 @@
27999 #include <asm/proto.h>
28000
28001 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28002-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
28003+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
28004 #else
28005 #include <asm/processor-flags.h>
28006 #include <asm/setup.h>
28007@@ -76,7 +76,7 @@ asmlinkage int system_call(void);
28008 #endif
28009
28010 /* Must be page-aligned because the real IDT is used in a fixmap. */
28011-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
28012+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
28013
28014 DECLARE_BITMAP(used_vectors, NR_VECTORS);
28015 EXPORT_SYMBOL_GPL(used_vectors);
28016@@ -108,11 +108,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
28017 }
28018
28019 static nokprobe_inline int
28020-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28021+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
28022 struct pt_regs *regs, long error_code)
28023 {
28024 #ifdef CONFIG_X86_32
28025- if (regs->flags & X86_VM_MASK) {
28026+ if (v8086_mode(regs)) {
28027 /*
28028 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
28029 * On nmi (interrupt 2), do_trap should not be called.
28030@@ -125,12 +125,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28031 return -1;
28032 }
28033 #endif
28034- if (!user_mode(regs)) {
28035+ if (!user_mode_novm(regs)) {
28036 if (!fixup_exception(regs)) {
28037 tsk->thread.error_code = error_code;
28038 tsk->thread.trap_nr = trapnr;
28039+
28040+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28041+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
28042+ str = "PAX: suspicious stack segment fault";
28043+#endif
28044+
28045 die(str, regs, error_code);
28046 }
28047+
28048+#ifdef CONFIG_PAX_REFCOUNT
28049+ if (trapnr == X86_TRAP_OF)
28050+ pax_report_refcount_overflow(regs);
28051+#endif
28052+
28053 return 0;
28054 }
28055
28056@@ -169,7 +181,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
28057 }
28058
28059 static void
28060-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28061+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
28062 long error_code, siginfo_t *info)
28063 {
28064 struct task_struct *tsk = current;
28065@@ -193,7 +205,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28066 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
28067 printk_ratelimit()) {
28068 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
28069- tsk->comm, tsk->pid, str,
28070+ tsk->comm, task_pid_nr(tsk), str,
28071 regs->ip, regs->sp, error_code);
28072 print_vma_addr(" in ", regs->ip);
28073 pr_cont("\n");
28074@@ -266,6 +278,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
28075 tsk->thread.error_code = error_code;
28076 tsk->thread.trap_nr = X86_TRAP_DF;
28077
28078+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
28079+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
28080+ die("grsec: kernel stack overflow detected", regs, error_code);
28081+#endif
28082+
28083 #ifdef CONFIG_DOUBLEFAULT
28084 df_debug(regs, error_code);
28085 #endif
28086@@ -288,7 +305,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
28087 conditional_sti(regs);
28088
28089 #ifdef CONFIG_X86_32
28090- if (regs->flags & X86_VM_MASK) {
28091+ if (v8086_mode(regs)) {
28092 local_irq_enable();
28093 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28094 goto exit;
28095@@ -296,18 +313,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28096 #endif
28097
28098 tsk = current;
28099- if (!user_mode(regs)) {
28100+ if (!user_mode_novm(regs)) {
28101 if (fixup_exception(regs))
28102 goto exit;
28103
28104 tsk->thread.error_code = error_code;
28105 tsk->thread.trap_nr = X86_TRAP_GP;
28106 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28107- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28108+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28109+
28110+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28111+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28112+ die("PAX: suspicious general protection fault", regs, error_code);
28113+ else
28114+#endif
28115+
28116 die("general protection fault", regs, error_code);
28117+ }
28118 goto exit;
28119 }
28120
28121+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28122+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28123+ struct mm_struct *mm = tsk->mm;
28124+ unsigned long limit;
28125+
28126+ down_write(&mm->mmap_sem);
28127+ limit = mm->context.user_cs_limit;
28128+ if (limit < TASK_SIZE) {
28129+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28130+ up_write(&mm->mmap_sem);
28131+ return;
28132+ }
28133+ up_write(&mm->mmap_sem);
28134+ }
28135+#endif
28136+
28137 tsk->thread.error_code = error_code;
28138 tsk->thread.trap_nr = X86_TRAP_GP;
28139
28140@@ -481,7 +522,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28141 /* It's safe to allow irq's after DR6 has been saved */
28142 preempt_conditional_sti(regs);
28143
28144- if (regs->flags & X86_VM_MASK) {
28145+ if (v8086_mode(regs)) {
28146 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28147 X86_TRAP_DB);
28148 preempt_conditional_cli(regs);
28149@@ -496,7 +537,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28150 * We already checked v86 mode above, so we can check for kernel mode
28151 * by just checking the CPL of CS.
28152 */
28153- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28154+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28155 tsk->thread.debugreg6 &= ~DR_STEP;
28156 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28157 regs->flags &= ~X86_EFLAGS_TF;
28158@@ -529,7 +570,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28159 return;
28160 conditional_sti(regs);
28161
28162- if (!user_mode_vm(regs))
28163+ if (!user_mode(regs))
28164 {
28165 if (!fixup_exception(regs)) {
28166 task->thread.error_code = error_code;
28167diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28168index ea03031..34a5cdda 100644
28169--- a/arch/x86/kernel/tsc.c
28170+++ b/arch/x86/kernel/tsc.c
28171@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28172 */
28173 smp_wmb();
28174
28175- ACCESS_ONCE(c2n->head) = data;
28176+ ACCESS_ONCE_RW(c2n->head) = data;
28177 }
28178
28179 /*
28180diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28181index 5d1cbfe..2a21feb 100644
28182--- a/arch/x86/kernel/uprobes.c
28183+++ b/arch/x86/kernel/uprobes.c
28184@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28185 int ret = NOTIFY_DONE;
28186
28187 /* We are only interested in userspace traps */
28188- if (regs && !user_mode_vm(regs))
28189+ if (regs && !user_mode(regs))
28190 return NOTIFY_DONE;
28191
28192 switch (val) {
28193@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28194
28195 if (nleft != rasize) {
28196 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28197- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28198+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28199
28200 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28201 }
28202diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28203index b9242ba..50c5edd 100644
28204--- a/arch/x86/kernel/verify_cpu.S
28205+++ b/arch/x86/kernel/verify_cpu.S
28206@@ -20,6 +20,7 @@
28207 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28208 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28209 * arch/x86/kernel/head_32.S: processor startup
28210+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28211 *
28212 * verify_cpu, returns the status of longmode and SSE in register %eax.
28213 * 0: Success 1: Failure
28214diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28215index e8edcf5..27f9344 100644
28216--- a/arch/x86/kernel/vm86_32.c
28217+++ b/arch/x86/kernel/vm86_32.c
28218@@ -44,6 +44,7 @@
28219 #include <linux/ptrace.h>
28220 #include <linux/audit.h>
28221 #include <linux/stddef.h>
28222+#include <linux/grsecurity.h>
28223
28224 #include <asm/uaccess.h>
28225 #include <asm/io.h>
28226@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28227 do_exit(SIGSEGV);
28228 }
28229
28230- tss = &per_cpu(init_tss, get_cpu());
28231+ tss = init_tss + get_cpu();
28232 current->thread.sp0 = current->thread.saved_sp0;
28233 current->thread.sysenter_cs = __KERNEL_CS;
28234 load_sp0(tss, &current->thread);
28235@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28236
28237 if (tsk->thread.saved_sp0)
28238 return -EPERM;
28239+
28240+#ifdef CONFIG_GRKERNSEC_VM86
28241+ if (!capable(CAP_SYS_RAWIO)) {
28242+ gr_handle_vm86();
28243+ return -EPERM;
28244+ }
28245+#endif
28246+
28247 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28248 offsetof(struct kernel_vm86_struct, vm86plus) -
28249 sizeof(info.regs));
28250@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28251 int tmp;
28252 struct vm86plus_struct __user *v86;
28253
28254+#ifdef CONFIG_GRKERNSEC_VM86
28255+ if (!capable(CAP_SYS_RAWIO)) {
28256+ gr_handle_vm86();
28257+ return -EPERM;
28258+ }
28259+#endif
28260+
28261 tsk = current;
28262 switch (cmd) {
28263 case VM86_REQUEST_IRQ:
28264@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28265 tsk->thread.saved_fs = info->regs32->fs;
28266 tsk->thread.saved_gs = get_user_gs(info->regs32);
28267
28268- tss = &per_cpu(init_tss, get_cpu());
28269+ tss = init_tss + get_cpu();
28270 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28271 if (cpu_has_sep)
28272 tsk->thread.sysenter_cs = 0;
28273@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28274 goto cannot_handle;
28275 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28276 goto cannot_handle;
28277- intr_ptr = (unsigned long __user *) (i << 2);
28278+ intr_ptr = (__force unsigned long __user *) (i << 2);
28279 if (get_user(segoffs, intr_ptr))
28280 goto cannot_handle;
28281 if ((segoffs >> 16) == BIOSSEG)
28282diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28283index 49edf2d..c0d1362 100644
28284--- a/arch/x86/kernel/vmlinux.lds.S
28285+++ b/arch/x86/kernel/vmlinux.lds.S
28286@@ -26,6 +26,13 @@
28287 #include <asm/page_types.h>
28288 #include <asm/cache.h>
28289 #include <asm/boot.h>
28290+#include <asm/segment.h>
28291+
28292+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28293+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28294+#else
28295+#define __KERNEL_TEXT_OFFSET 0
28296+#endif
28297
28298 #undef i386 /* in case the preprocessor is a 32bit one */
28299
28300@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28301
28302 PHDRS {
28303 text PT_LOAD FLAGS(5); /* R_E */
28304+#ifdef CONFIG_X86_32
28305+ module PT_LOAD FLAGS(5); /* R_E */
28306+#endif
28307+#ifdef CONFIG_XEN
28308+ rodata PT_LOAD FLAGS(5); /* R_E */
28309+#else
28310+ rodata PT_LOAD FLAGS(4); /* R__ */
28311+#endif
28312 data PT_LOAD FLAGS(6); /* RW_ */
28313-#ifdef CONFIG_X86_64
28314+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28315 #ifdef CONFIG_SMP
28316 percpu PT_LOAD FLAGS(6); /* RW_ */
28317 #endif
28318+ text.init PT_LOAD FLAGS(5); /* R_E */
28319+ text.exit PT_LOAD FLAGS(5); /* R_E */
28320 init PT_LOAD FLAGS(7); /* RWE */
28321-#endif
28322 note PT_NOTE FLAGS(0); /* ___ */
28323 }
28324
28325 SECTIONS
28326 {
28327 #ifdef CONFIG_X86_32
28328- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28329- phys_startup_32 = startup_32 - LOAD_OFFSET;
28330+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28331 #else
28332- . = __START_KERNEL;
28333- phys_startup_64 = startup_64 - LOAD_OFFSET;
28334+ . = __START_KERNEL;
28335 #endif
28336
28337 /* Text and read-only data */
28338- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28339- _text = .;
28340+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28341 /* bootstrapping code */
28342+#ifdef CONFIG_X86_32
28343+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28344+#else
28345+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28346+#endif
28347+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28348+ _text = .;
28349 HEAD_TEXT
28350 . = ALIGN(8);
28351 _stext = .;
28352@@ -104,13 +124,47 @@ SECTIONS
28353 IRQENTRY_TEXT
28354 *(.fixup)
28355 *(.gnu.warning)
28356- /* End of text section */
28357- _etext = .;
28358 } :text = 0x9090
28359
28360- NOTES :text :note
28361+ . += __KERNEL_TEXT_OFFSET;
28362
28363- EXCEPTION_TABLE(16) :text = 0x9090
28364+#ifdef CONFIG_X86_32
28365+ . = ALIGN(PAGE_SIZE);
28366+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28367+
28368+#ifdef CONFIG_PAX_KERNEXEC
28369+ MODULES_EXEC_VADDR = .;
28370+ BYTE(0)
28371+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28372+ . = ALIGN(HPAGE_SIZE) - 1;
28373+ MODULES_EXEC_END = .;
28374+#endif
28375+
28376+ } :module
28377+#endif
28378+
28379+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28380+ /* End of text section */
28381+ BYTE(0)
28382+ _etext = . - __KERNEL_TEXT_OFFSET;
28383+ }
28384+
28385+#ifdef CONFIG_X86_32
28386+ . = ALIGN(PAGE_SIZE);
28387+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28388+ . = ALIGN(PAGE_SIZE);
28389+ *(.empty_zero_page)
28390+ *(.initial_pg_fixmap)
28391+ *(.initial_pg_pmd)
28392+ *(.initial_page_table)
28393+ *(.swapper_pg_dir)
28394+ } :rodata
28395+#endif
28396+
28397+ . = ALIGN(PAGE_SIZE);
28398+ NOTES :rodata :note
28399+
28400+ EXCEPTION_TABLE(16) :rodata
28401
28402 #if defined(CONFIG_DEBUG_RODATA)
28403 /* .text should occupy whole number of pages */
28404@@ -122,16 +176,20 @@ SECTIONS
28405
28406 /* Data */
28407 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28408+
28409+#ifdef CONFIG_PAX_KERNEXEC
28410+ . = ALIGN(HPAGE_SIZE);
28411+#else
28412+ . = ALIGN(PAGE_SIZE);
28413+#endif
28414+
28415 /* Start of data section */
28416 _sdata = .;
28417
28418 /* init_task */
28419 INIT_TASK_DATA(THREAD_SIZE)
28420
28421-#ifdef CONFIG_X86_32
28422- /* 32 bit has nosave before _edata */
28423 NOSAVE_DATA
28424-#endif
28425
28426 PAGE_ALIGNED_DATA(PAGE_SIZE)
28427
28428@@ -174,12 +232,19 @@ SECTIONS
28429 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28430
28431 /* Init code and data - will be freed after init */
28432- . = ALIGN(PAGE_SIZE);
28433 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28434+ BYTE(0)
28435+
28436+#ifdef CONFIG_PAX_KERNEXEC
28437+ . = ALIGN(HPAGE_SIZE);
28438+#else
28439+ . = ALIGN(PAGE_SIZE);
28440+#endif
28441+
28442 __init_begin = .; /* paired with __init_end */
28443- }
28444+ } :init.begin
28445
28446-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28447+#ifdef CONFIG_SMP
28448 /*
28449 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28450 * output PHDR, so the next output section - .init.text - should
28451@@ -188,12 +253,27 @@ SECTIONS
28452 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
28453 #endif
28454
28455- INIT_TEXT_SECTION(PAGE_SIZE)
28456-#ifdef CONFIG_X86_64
28457- :init
28458-#endif
28459+ . = ALIGN(PAGE_SIZE);
28460+ init_begin = .;
28461+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28462+ VMLINUX_SYMBOL(_sinittext) = .;
28463+ INIT_TEXT
28464+ VMLINUX_SYMBOL(_einittext) = .;
28465+ . = ALIGN(PAGE_SIZE);
28466+ } :text.init
28467
28468- INIT_DATA_SECTION(16)
28469+ /*
28470+ * .exit.text is discard at runtime, not link time, to deal with
28471+ * references from .altinstructions and .eh_frame
28472+ */
28473+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28474+ EXIT_TEXT
28475+ . = ALIGN(16);
28476+ } :text.exit
28477+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28478+
28479+ . = ALIGN(PAGE_SIZE);
28480+ INIT_DATA_SECTION(16) :init
28481
28482 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28483 __x86_cpu_dev_start = .;
28484@@ -264,19 +344,12 @@ SECTIONS
28485 }
28486
28487 . = ALIGN(8);
28488- /*
28489- * .exit.text is discard at runtime, not link time, to deal with
28490- * references from .altinstructions and .eh_frame
28491- */
28492- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28493- EXIT_TEXT
28494- }
28495
28496 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28497 EXIT_DATA
28498 }
28499
28500-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28501+#ifndef CONFIG_SMP
28502 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28503 #endif
28504
28505@@ -295,16 +368,10 @@ SECTIONS
28506 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28507 __smp_locks = .;
28508 *(.smp_locks)
28509- . = ALIGN(PAGE_SIZE);
28510 __smp_locks_end = .;
28511+ . = ALIGN(PAGE_SIZE);
28512 }
28513
28514-#ifdef CONFIG_X86_64
28515- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28516- NOSAVE_DATA
28517- }
28518-#endif
28519-
28520 /* BSS */
28521 . = ALIGN(PAGE_SIZE);
28522 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28523@@ -320,6 +387,7 @@ SECTIONS
28524 __brk_base = .;
28525 . += 64 * 1024; /* 64k alignment slop space */
28526 *(.brk_reservation) /* areas brk users have reserved */
28527+ . = ALIGN(HPAGE_SIZE);
28528 __brk_limit = .;
28529 }
28530
28531@@ -346,13 +414,12 @@ SECTIONS
28532 * for the boot processor.
28533 */
28534 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28535-INIT_PER_CPU(gdt_page);
28536 INIT_PER_CPU(irq_stack_union);
28537
28538 /*
28539 * Build-time check on the image size:
28540 */
28541-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28542+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28543 "kernel image bigger than KERNEL_IMAGE_SIZE");
28544
28545 #ifdef CONFIG_SMP
28546diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28547index ea5b570..960e4da 100644
28548--- a/arch/x86/kernel/vsyscall_64.c
28549+++ b/arch/x86/kernel/vsyscall_64.c
28550@@ -54,15 +54,13 @@
28551
28552 DEFINE_VVAR(int, vgetcpu_mode);
28553
28554-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28555+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28556
28557 static int __init vsyscall_setup(char *str)
28558 {
28559 if (str) {
28560 if (!strcmp("emulate", str))
28561 vsyscall_mode = EMULATE;
28562- else if (!strcmp("native", str))
28563- vsyscall_mode = NATIVE;
28564 else if (!strcmp("none", str))
28565 vsyscall_mode = NONE;
28566 else
28567@@ -279,8 +277,7 @@ do_ret:
28568 return true;
28569
28570 sigsegv:
28571- force_sig(SIGSEGV, current);
28572- return true;
28573+ do_group_exit(SIGKILL);
28574 }
28575
28576 /*
28577@@ -331,10 +328,7 @@ void __init map_vsyscall(void)
28578 extern char __vsyscall_page;
28579 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28580
28581- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28582- vsyscall_mode == NATIVE
28583- ? PAGE_KERNEL_VSYSCALL
28584- : PAGE_KERNEL_VVAR);
28585+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28586 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28587 (unsigned long)VSYSCALL_ADDR);
28588 }
28589diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28590index 04068192..4d75aa6 100644
28591--- a/arch/x86/kernel/x8664_ksyms_64.c
28592+++ b/arch/x86/kernel/x8664_ksyms_64.c
28593@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28594 EXPORT_SYMBOL(copy_user_generic_unrolled);
28595 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28596 EXPORT_SYMBOL(__copy_user_nocache);
28597-EXPORT_SYMBOL(_copy_from_user);
28598-EXPORT_SYMBOL(_copy_to_user);
28599
28600 EXPORT_SYMBOL(copy_page);
28601 EXPORT_SYMBOL(clear_page);
28602@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28603 EXPORT_SYMBOL(___preempt_schedule_context);
28604 #endif
28605 #endif
28606+
28607+#ifdef CONFIG_PAX_PER_CPU_PGD
28608+EXPORT_SYMBOL(cpu_pgd);
28609+#endif
28610diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28611index e48b674..a451dd9 100644
28612--- a/arch/x86/kernel/x86_init.c
28613+++ b/arch/x86/kernel/x86_init.c
28614@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28615 static void default_nmi_init(void) { };
28616 static int default_i8042_detect(void) { return 1; };
28617
28618-struct x86_platform_ops x86_platform = {
28619+struct x86_platform_ops x86_platform __read_only = {
28620 .calibrate_tsc = native_calibrate_tsc,
28621 .get_wallclock = mach_get_cmos_time,
28622 .set_wallclock = mach_set_rtc_mmss,
28623@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28624 EXPORT_SYMBOL_GPL(x86_platform);
28625
28626 #if defined(CONFIG_PCI_MSI)
28627-struct x86_msi_ops x86_msi = {
28628+struct x86_msi_ops x86_msi __read_only = {
28629 .setup_msi_irqs = native_setup_msi_irqs,
28630 .compose_msi_msg = native_compose_msi_msg,
28631 .teardown_msi_irq = native_teardown_msi_irq,
28632@@ -150,7 +150,7 @@ u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
28633 }
28634 #endif
28635
28636-struct x86_io_apic_ops x86_io_apic_ops = {
28637+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28638 .init = native_io_apic_init_mappings,
28639 .read = native_io_apic_read,
28640 .write = native_io_apic_write,
28641diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28642index a4b451c..8dfe1ad 100644
28643--- a/arch/x86/kernel/xsave.c
28644+++ b/arch/x86/kernel/xsave.c
28645@@ -164,18 +164,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28646
28647 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28648 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28649- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28650+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28651
28652 if (!use_xsave())
28653 return err;
28654
28655- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28656+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28657
28658 /*
28659 * Read the xstate_bv which we copied (directly from the cpu or
28660 * from the state in task struct) to the user buffers.
28661 */
28662- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28663+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28664
28665 /*
28666 * For legacy compatible, we always set FP/SSE bits in the bit
28667@@ -190,7 +190,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28668 */
28669 xstate_bv |= XSTATE_FPSSE;
28670
28671- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28672+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28673
28674 return err;
28675 }
28676@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28677 {
28678 int err;
28679
28680+ buf = (struct xsave_struct __user *)____m(buf);
28681 if (use_xsave())
28682 err = xsave_user(buf);
28683 else if (use_fxsr())
28684@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28685 */
28686 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28687 {
28688+ buf = (void __user *)____m(buf);
28689 if (use_xsave()) {
28690 if ((unsigned long)buf % 64 || fx_only) {
28691 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28692diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28693index 38a0afe..94421a9 100644
28694--- a/arch/x86/kvm/cpuid.c
28695+++ b/arch/x86/kvm/cpuid.c
28696@@ -166,15 +166,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28697 struct kvm_cpuid2 *cpuid,
28698 struct kvm_cpuid_entry2 __user *entries)
28699 {
28700- int r;
28701+ int r, i;
28702
28703 r = -E2BIG;
28704 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28705 goto out;
28706 r = -EFAULT;
28707- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28708- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28709+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28710 goto out;
28711+ for (i = 0; i < cpuid->nent; ++i) {
28712+ struct kvm_cpuid_entry2 cpuid_entry;
28713+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28714+ goto out;
28715+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28716+ }
28717 vcpu->arch.cpuid_nent = cpuid->nent;
28718 kvm_apic_set_version(vcpu);
28719 kvm_x86_ops->cpuid_update(vcpu);
28720@@ -189,15 +194,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28721 struct kvm_cpuid2 *cpuid,
28722 struct kvm_cpuid_entry2 __user *entries)
28723 {
28724- int r;
28725+ int r, i;
28726
28727 r = -E2BIG;
28728 if (cpuid->nent < vcpu->arch.cpuid_nent)
28729 goto out;
28730 r = -EFAULT;
28731- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28732- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28733+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28734 goto out;
28735+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28736+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28737+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28738+ goto out;
28739+ }
28740 return 0;
28741
28742 out:
28743diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28744index 0069118..c28ec0a 100644
28745--- a/arch/x86/kvm/lapic.c
28746+++ b/arch/x86/kvm/lapic.c
28747@@ -55,7 +55,7 @@
28748 #define APIC_BUS_CYCLE_NS 1
28749
28750 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28751-#define apic_debug(fmt, arg...)
28752+#define apic_debug(fmt, arg...) do {} while (0)
28753
28754 #define APIC_LVT_NUM 6
28755 /* 14 is the version for Xeon and Pentium 8.4.8*/
28756diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28757index 4107765..d9eb358 100644
28758--- a/arch/x86/kvm/paging_tmpl.h
28759+++ b/arch/x86/kvm/paging_tmpl.h
28760@@ -331,7 +331,7 @@ retry_walk:
28761 if (unlikely(kvm_is_error_hva(host_addr)))
28762 goto error;
28763
28764- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28765+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28766 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28767 goto error;
28768 walker->ptep_user[walker->level - 1] = ptep_user;
28769diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28770index b5e994a..35b5866 100644
28771--- a/arch/x86/kvm/svm.c
28772+++ b/arch/x86/kvm/svm.c
28773@@ -3541,7 +3541,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28774 int cpu = raw_smp_processor_id();
28775
28776 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28777+
28778+ pax_open_kernel();
28779 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28780+ pax_close_kernel();
28781+
28782 load_TR_desc();
28783 }
28784
28785@@ -3942,6 +3946,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28786 #endif
28787 #endif
28788
28789+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28790+ __set_fs(current_thread_info()->addr_limit);
28791+#endif
28792+
28793 reload_tss(vcpu);
28794
28795 local_irq_disable();
28796diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28797index 801332e..eeff1cc 100644
28798--- a/arch/x86/kvm/vmx.c
28799+++ b/arch/x86/kvm/vmx.c
28800@@ -1339,12 +1339,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28801 #endif
28802 }
28803
28804-static void vmcs_clear_bits(unsigned long field, u32 mask)
28805+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28806 {
28807 vmcs_writel(field, vmcs_readl(field) & ~mask);
28808 }
28809
28810-static void vmcs_set_bits(unsigned long field, u32 mask)
28811+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28812 {
28813 vmcs_writel(field, vmcs_readl(field) | mask);
28814 }
28815@@ -1604,7 +1604,11 @@ static void reload_tss(void)
28816 struct desc_struct *descs;
28817
28818 descs = (void *)gdt->address;
28819+
28820+ pax_open_kernel();
28821 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28822+ pax_close_kernel();
28823+
28824 load_TR_desc();
28825 }
28826
28827@@ -1832,6 +1836,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28828 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28829 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28830
28831+#ifdef CONFIG_PAX_PER_CPU_PGD
28832+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28833+#endif
28834+
28835 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28836 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28837 vmx->loaded_vmcs->cpu = cpu;
28838@@ -2121,7 +2129,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28839 * reads and returns guest's timestamp counter "register"
28840 * guest_tsc = host_tsc + tsc_offset -- 21.3
28841 */
28842-static u64 guest_read_tsc(void)
28843+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28844 {
28845 u64 host_tsc, tsc_offset;
28846
28847@@ -3093,8 +3101,11 @@ static __init int hardware_setup(void)
28848 if (!cpu_has_vmx_flexpriority())
28849 flexpriority_enabled = 0;
28850
28851- if (!cpu_has_vmx_tpr_shadow())
28852- kvm_x86_ops->update_cr8_intercept = NULL;
28853+ if (!cpu_has_vmx_tpr_shadow()) {
28854+ pax_open_kernel();
28855+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28856+ pax_close_kernel();
28857+ }
28858
28859 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28860 kvm_disable_largepages();
28861@@ -3105,13 +3116,15 @@ static __init int hardware_setup(void)
28862 if (!cpu_has_vmx_apicv())
28863 enable_apicv = 0;
28864
28865+ pax_open_kernel();
28866 if (enable_apicv)
28867- kvm_x86_ops->update_cr8_intercept = NULL;
28868+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28869 else {
28870- kvm_x86_ops->hwapic_irr_update = NULL;
28871- kvm_x86_ops->deliver_posted_interrupt = NULL;
28872- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28873+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28874+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28875+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28876 }
28877+ pax_close_kernel();
28878
28879 if (nested)
28880 nested_vmx_setup_ctls_msrs();
28881@@ -4221,7 +4234,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28882
28883 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28884 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
28885+
28886+#ifndef CONFIG_PAX_PER_CPU_PGD
28887 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28888+#endif
28889
28890 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
28891 #ifdef CONFIG_X86_64
28892@@ -4243,7 +4259,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28893 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28894 vmx->host_idt_base = dt.address;
28895
28896- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28897+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28898
28899 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28900 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28901@@ -7413,6 +7429,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28902 "jmp 2f \n\t"
28903 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28904 "2: "
28905+
28906+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28907+ "ljmp %[cs],$3f\n\t"
28908+ "3: "
28909+#endif
28910+
28911 /* Save guest registers, load host registers, keep flags */
28912 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28913 "pop %0 \n\t"
28914@@ -7465,6 +7487,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28915 #endif
28916 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28917 [wordsize]"i"(sizeof(ulong))
28918+
28919+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28920+ ,[cs]"i"(__KERNEL_CS)
28921+#endif
28922+
28923 : "cc", "memory"
28924 #ifdef CONFIG_X86_64
28925 , "rax", "rbx", "rdi", "rsi"
28926@@ -7478,7 +7505,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28927 if (debugctlmsr)
28928 update_debugctlmsr(debugctlmsr);
28929
28930-#ifndef CONFIG_X86_64
28931+#ifdef CONFIG_X86_32
28932 /*
28933 * The sysexit path does not restore ds/es, so we must set them to
28934 * a reasonable value ourselves.
28935@@ -7487,8 +7514,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28936 * may be executed in interrupt context, which saves and restore segments
28937 * around it, nullifying its effect.
28938 */
28939- loadsegment(ds, __USER_DS);
28940- loadsegment(es, __USER_DS);
28941+ loadsegment(ds, __KERNEL_DS);
28942+ loadsegment(es, __KERNEL_DS);
28943+ loadsegment(ss, __KERNEL_DS);
28944+
28945+#ifdef CONFIG_PAX_KERNEXEC
28946+ loadsegment(fs, __KERNEL_PERCPU);
28947+#endif
28948+
28949+#ifdef CONFIG_PAX_MEMORY_UDEREF
28950+ __set_fs(current_thread_info()->addr_limit);
28951+#endif
28952+
28953 #endif
28954
28955 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28956diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28957index ef432f8..a630659 100644
28958--- a/arch/x86/kvm/x86.c
28959+++ b/arch/x86/kvm/x86.c
28960@@ -1808,8 +1808,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28961 {
28962 struct kvm *kvm = vcpu->kvm;
28963 int lm = is_long_mode(vcpu);
28964- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28965- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28966+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28967+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28968 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28969 : kvm->arch.xen_hvm_config.blob_size_32;
28970 u32 page_num = data & ~PAGE_MASK;
28971@@ -2729,6 +2729,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28972 if (n < msr_list.nmsrs)
28973 goto out;
28974 r = -EFAULT;
28975+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
28976+ goto out;
28977 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
28978 num_msrs_to_save * sizeof(u32)))
28979 goto out;
28980@@ -5567,7 +5569,7 @@ static struct notifier_block pvclock_gtod_notifier = {
28981 };
28982 #endif
28983
28984-int kvm_arch_init(void *opaque)
28985+int kvm_arch_init(const void *opaque)
28986 {
28987 int r;
28988 struct kvm_x86_ops *ops = opaque;
28989diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
28990index aae9413..d11e829 100644
28991--- a/arch/x86/lguest/boot.c
28992+++ b/arch/x86/lguest/boot.c
28993@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
28994 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
28995 * Launcher to reboot us.
28996 */
28997-static void lguest_restart(char *reason)
28998+static __noreturn void lguest_restart(char *reason)
28999 {
29000 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
29001+ BUG();
29002 }
29003
29004 /*G:050
29005diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
29006index 00933d5..3a64af9 100644
29007--- a/arch/x86/lib/atomic64_386_32.S
29008+++ b/arch/x86/lib/atomic64_386_32.S
29009@@ -48,6 +48,10 @@ BEGIN(read)
29010 movl (v), %eax
29011 movl 4(v), %edx
29012 RET_ENDP
29013+BEGIN(read_unchecked)
29014+ movl (v), %eax
29015+ movl 4(v), %edx
29016+RET_ENDP
29017 #undef v
29018
29019 #define v %esi
29020@@ -55,6 +59,10 @@ BEGIN(set)
29021 movl %ebx, (v)
29022 movl %ecx, 4(v)
29023 RET_ENDP
29024+BEGIN(set_unchecked)
29025+ movl %ebx, (v)
29026+ movl %ecx, 4(v)
29027+RET_ENDP
29028 #undef v
29029
29030 #define v %esi
29031@@ -70,6 +78,20 @@ RET_ENDP
29032 BEGIN(add)
29033 addl %eax, (v)
29034 adcl %edx, 4(v)
29035+
29036+#ifdef CONFIG_PAX_REFCOUNT
29037+ jno 0f
29038+ subl %eax, (v)
29039+ sbbl %edx, 4(v)
29040+ int $4
29041+0:
29042+ _ASM_EXTABLE(0b, 0b)
29043+#endif
29044+
29045+RET_ENDP
29046+BEGIN(add_unchecked)
29047+ addl %eax, (v)
29048+ adcl %edx, 4(v)
29049 RET_ENDP
29050 #undef v
29051
29052@@ -77,6 +99,24 @@ RET_ENDP
29053 BEGIN(add_return)
29054 addl (v), %eax
29055 adcl 4(v), %edx
29056+
29057+#ifdef CONFIG_PAX_REFCOUNT
29058+ into
29059+1234:
29060+ _ASM_EXTABLE(1234b, 2f)
29061+#endif
29062+
29063+ movl %eax, (v)
29064+ movl %edx, 4(v)
29065+
29066+#ifdef CONFIG_PAX_REFCOUNT
29067+2:
29068+#endif
29069+
29070+RET_ENDP
29071+BEGIN(add_return_unchecked)
29072+ addl (v), %eax
29073+ adcl 4(v), %edx
29074 movl %eax, (v)
29075 movl %edx, 4(v)
29076 RET_ENDP
29077@@ -86,6 +126,20 @@ RET_ENDP
29078 BEGIN(sub)
29079 subl %eax, (v)
29080 sbbl %edx, 4(v)
29081+
29082+#ifdef CONFIG_PAX_REFCOUNT
29083+ jno 0f
29084+ addl %eax, (v)
29085+ adcl %edx, 4(v)
29086+ int $4
29087+0:
29088+ _ASM_EXTABLE(0b, 0b)
29089+#endif
29090+
29091+RET_ENDP
29092+BEGIN(sub_unchecked)
29093+ subl %eax, (v)
29094+ sbbl %edx, 4(v)
29095 RET_ENDP
29096 #undef v
29097
29098@@ -96,6 +150,27 @@ BEGIN(sub_return)
29099 sbbl $0, %edx
29100 addl (v), %eax
29101 adcl 4(v), %edx
29102+
29103+#ifdef CONFIG_PAX_REFCOUNT
29104+ into
29105+1234:
29106+ _ASM_EXTABLE(1234b, 2f)
29107+#endif
29108+
29109+ movl %eax, (v)
29110+ movl %edx, 4(v)
29111+
29112+#ifdef CONFIG_PAX_REFCOUNT
29113+2:
29114+#endif
29115+
29116+RET_ENDP
29117+BEGIN(sub_return_unchecked)
29118+ negl %edx
29119+ negl %eax
29120+ sbbl $0, %edx
29121+ addl (v), %eax
29122+ adcl 4(v), %edx
29123 movl %eax, (v)
29124 movl %edx, 4(v)
29125 RET_ENDP
29126@@ -105,6 +180,20 @@ RET_ENDP
29127 BEGIN(inc)
29128 addl $1, (v)
29129 adcl $0, 4(v)
29130+
29131+#ifdef CONFIG_PAX_REFCOUNT
29132+ jno 0f
29133+ subl $1, (v)
29134+ sbbl $0, 4(v)
29135+ int $4
29136+0:
29137+ _ASM_EXTABLE(0b, 0b)
29138+#endif
29139+
29140+RET_ENDP
29141+BEGIN(inc_unchecked)
29142+ addl $1, (v)
29143+ adcl $0, 4(v)
29144 RET_ENDP
29145 #undef v
29146
29147@@ -114,6 +203,26 @@ BEGIN(inc_return)
29148 movl 4(v), %edx
29149 addl $1, %eax
29150 adcl $0, %edx
29151+
29152+#ifdef CONFIG_PAX_REFCOUNT
29153+ into
29154+1234:
29155+ _ASM_EXTABLE(1234b, 2f)
29156+#endif
29157+
29158+ movl %eax, (v)
29159+ movl %edx, 4(v)
29160+
29161+#ifdef CONFIG_PAX_REFCOUNT
29162+2:
29163+#endif
29164+
29165+RET_ENDP
29166+BEGIN(inc_return_unchecked)
29167+ movl (v), %eax
29168+ movl 4(v), %edx
29169+ addl $1, %eax
29170+ adcl $0, %edx
29171 movl %eax, (v)
29172 movl %edx, 4(v)
29173 RET_ENDP
29174@@ -123,6 +232,20 @@ RET_ENDP
29175 BEGIN(dec)
29176 subl $1, (v)
29177 sbbl $0, 4(v)
29178+
29179+#ifdef CONFIG_PAX_REFCOUNT
29180+ jno 0f
29181+ addl $1, (v)
29182+ adcl $0, 4(v)
29183+ int $4
29184+0:
29185+ _ASM_EXTABLE(0b, 0b)
29186+#endif
29187+
29188+RET_ENDP
29189+BEGIN(dec_unchecked)
29190+ subl $1, (v)
29191+ sbbl $0, 4(v)
29192 RET_ENDP
29193 #undef v
29194
29195@@ -132,6 +255,26 @@ BEGIN(dec_return)
29196 movl 4(v), %edx
29197 subl $1, %eax
29198 sbbl $0, %edx
29199+
29200+#ifdef CONFIG_PAX_REFCOUNT
29201+ into
29202+1234:
29203+ _ASM_EXTABLE(1234b, 2f)
29204+#endif
29205+
29206+ movl %eax, (v)
29207+ movl %edx, 4(v)
29208+
29209+#ifdef CONFIG_PAX_REFCOUNT
29210+2:
29211+#endif
29212+
29213+RET_ENDP
29214+BEGIN(dec_return_unchecked)
29215+ movl (v), %eax
29216+ movl 4(v), %edx
29217+ subl $1, %eax
29218+ sbbl $0, %edx
29219 movl %eax, (v)
29220 movl %edx, 4(v)
29221 RET_ENDP
29222@@ -143,6 +286,13 @@ BEGIN(add_unless)
29223 adcl %edx, %edi
29224 addl (v), %eax
29225 adcl 4(v), %edx
29226+
29227+#ifdef CONFIG_PAX_REFCOUNT
29228+ into
29229+1234:
29230+ _ASM_EXTABLE(1234b, 2f)
29231+#endif
29232+
29233 cmpl %eax, %ecx
29234 je 3f
29235 1:
29236@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29237 1:
29238 addl $1, %eax
29239 adcl $0, %edx
29240+
29241+#ifdef CONFIG_PAX_REFCOUNT
29242+ into
29243+1234:
29244+ _ASM_EXTABLE(1234b, 2f)
29245+#endif
29246+
29247 movl %eax, (v)
29248 movl %edx, 4(v)
29249 movl $1, %eax
29250@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29251 movl 4(v), %edx
29252 subl $1, %eax
29253 sbbl $0, %edx
29254+
29255+#ifdef CONFIG_PAX_REFCOUNT
29256+ into
29257+1234:
29258+ _ASM_EXTABLE(1234b, 1f)
29259+#endif
29260+
29261 js 1f
29262 movl %eax, (v)
29263 movl %edx, 4(v)
29264diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29265index f5cc9eb..51fa319 100644
29266--- a/arch/x86/lib/atomic64_cx8_32.S
29267+++ b/arch/x86/lib/atomic64_cx8_32.S
29268@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29269 CFI_STARTPROC
29270
29271 read64 %ecx
29272+ pax_force_retaddr
29273 ret
29274 CFI_ENDPROC
29275 ENDPROC(atomic64_read_cx8)
29276
29277+ENTRY(atomic64_read_unchecked_cx8)
29278+ CFI_STARTPROC
29279+
29280+ read64 %ecx
29281+ pax_force_retaddr
29282+ ret
29283+ CFI_ENDPROC
29284+ENDPROC(atomic64_read_unchecked_cx8)
29285+
29286 ENTRY(atomic64_set_cx8)
29287 CFI_STARTPROC
29288
29289@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29290 cmpxchg8b (%esi)
29291 jne 1b
29292
29293+ pax_force_retaddr
29294 ret
29295 CFI_ENDPROC
29296 ENDPROC(atomic64_set_cx8)
29297
29298+ENTRY(atomic64_set_unchecked_cx8)
29299+ CFI_STARTPROC
29300+
29301+1:
29302+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29303+ * are atomic on 586 and newer */
29304+ cmpxchg8b (%esi)
29305+ jne 1b
29306+
29307+ pax_force_retaddr
29308+ ret
29309+ CFI_ENDPROC
29310+ENDPROC(atomic64_set_unchecked_cx8)
29311+
29312 ENTRY(atomic64_xchg_cx8)
29313 CFI_STARTPROC
29314
29315@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29316 cmpxchg8b (%esi)
29317 jne 1b
29318
29319+ pax_force_retaddr
29320 ret
29321 CFI_ENDPROC
29322 ENDPROC(atomic64_xchg_cx8)
29323
29324-.macro addsub_return func ins insc
29325-ENTRY(atomic64_\func\()_return_cx8)
29326+.macro addsub_return func ins insc unchecked=""
29327+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29328 CFI_STARTPROC
29329 SAVE ebp
29330 SAVE ebx
29331@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29332 movl %edx, %ecx
29333 \ins\()l %esi, %ebx
29334 \insc\()l %edi, %ecx
29335+
29336+.ifb \unchecked
29337+#ifdef CONFIG_PAX_REFCOUNT
29338+ into
29339+2:
29340+ _ASM_EXTABLE(2b, 3f)
29341+#endif
29342+.endif
29343+
29344 LOCK_PREFIX
29345 cmpxchg8b (%ebp)
29346 jne 1b
29347-
29348-10:
29349 movl %ebx, %eax
29350 movl %ecx, %edx
29351+
29352+.ifb \unchecked
29353+#ifdef CONFIG_PAX_REFCOUNT
29354+3:
29355+#endif
29356+.endif
29357+
29358 RESTORE edi
29359 RESTORE esi
29360 RESTORE ebx
29361 RESTORE ebp
29362+ pax_force_retaddr
29363 ret
29364 CFI_ENDPROC
29365-ENDPROC(atomic64_\func\()_return_cx8)
29366+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29367 .endm
29368
29369 addsub_return add add adc
29370 addsub_return sub sub sbb
29371+addsub_return add add adc _unchecked
29372+addsub_return sub sub sbb _unchecked
29373
29374-.macro incdec_return func ins insc
29375-ENTRY(atomic64_\func\()_return_cx8)
29376+.macro incdec_return func ins insc unchecked=""
29377+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29378 CFI_STARTPROC
29379 SAVE ebx
29380
29381@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29382 movl %edx, %ecx
29383 \ins\()l $1, %ebx
29384 \insc\()l $0, %ecx
29385+
29386+.ifb \unchecked
29387+#ifdef CONFIG_PAX_REFCOUNT
29388+ into
29389+2:
29390+ _ASM_EXTABLE(2b, 3f)
29391+#endif
29392+.endif
29393+
29394 LOCK_PREFIX
29395 cmpxchg8b (%esi)
29396 jne 1b
29397
29398-10:
29399 movl %ebx, %eax
29400 movl %ecx, %edx
29401+
29402+.ifb \unchecked
29403+#ifdef CONFIG_PAX_REFCOUNT
29404+3:
29405+#endif
29406+.endif
29407+
29408 RESTORE ebx
29409+ pax_force_retaddr
29410 ret
29411 CFI_ENDPROC
29412-ENDPROC(atomic64_\func\()_return_cx8)
29413+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29414 .endm
29415
29416 incdec_return inc add adc
29417 incdec_return dec sub sbb
29418+incdec_return inc add adc _unchecked
29419+incdec_return dec sub sbb _unchecked
29420
29421 ENTRY(atomic64_dec_if_positive_cx8)
29422 CFI_STARTPROC
29423@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29424 movl %edx, %ecx
29425 subl $1, %ebx
29426 sbb $0, %ecx
29427+
29428+#ifdef CONFIG_PAX_REFCOUNT
29429+ into
29430+1234:
29431+ _ASM_EXTABLE(1234b, 2f)
29432+#endif
29433+
29434 js 2f
29435 LOCK_PREFIX
29436 cmpxchg8b (%esi)
29437@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29438 movl %ebx, %eax
29439 movl %ecx, %edx
29440 RESTORE ebx
29441+ pax_force_retaddr
29442 ret
29443 CFI_ENDPROC
29444 ENDPROC(atomic64_dec_if_positive_cx8)
29445@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29446 movl %edx, %ecx
29447 addl %ebp, %ebx
29448 adcl %edi, %ecx
29449+
29450+#ifdef CONFIG_PAX_REFCOUNT
29451+ into
29452+1234:
29453+ _ASM_EXTABLE(1234b, 3f)
29454+#endif
29455+
29456 LOCK_PREFIX
29457 cmpxchg8b (%esi)
29458 jne 1b
29459@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29460 CFI_ADJUST_CFA_OFFSET -8
29461 RESTORE ebx
29462 RESTORE ebp
29463+ pax_force_retaddr
29464 ret
29465 4:
29466 cmpl %edx, 4(%esp)
29467@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29468 xorl %ecx, %ecx
29469 addl $1, %ebx
29470 adcl %edx, %ecx
29471+
29472+#ifdef CONFIG_PAX_REFCOUNT
29473+ into
29474+1234:
29475+ _ASM_EXTABLE(1234b, 3f)
29476+#endif
29477+
29478 LOCK_PREFIX
29479 cmpxchg8b (%esi)
29480 jne 1b
29481@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29482 movl $1, %eax
29483 3:
29484 RESTORE ebx
29485+ pax_force_retaddr
29486 ret
29487 CFI_ENDPROC
29488 ENDPROC(atomic64_inc_not_zero_cx8)
29489diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29490index e78b8ee..7e173a8 100644
29491--- a/arch/x86/lib/checksum_32.S
29492+++ b/arch/x86/lib/checksum_32.S
29493@@ -29,7 +29,8 @@
29494 #include <asm/dwarf2.h>
29495 #include <asm/errno.h>
29496 #include <asm/asm.h>
29497-
29498+#include <asm/segment.h>
29499+
29500 /*
29501 * computes a partial checksum, e.g. for TCP/UDP fragments
29502 */
29503@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29504
29505 #define ARGBASE 16
29506 #define FP 12
29507-
29508-ENTRY(csum_partial_copy_generic)
29509+
29510+ENTRY(csum_partial_copy_generic_to_user)
29511 CFI_STARTPROC
29512+
29513+#ifdef CONFIG_PAX_MEMORY_UDEREF
29514+ pushl_cfi %gs
29515+ popl_cfi %es
29516+ jmp csum_partial_copy_generic
29517+#endif
29518+
29519+ENTRY(csum_partial_copy_generic_from_user)
29520+
29521+#ifdef CONFIG_PAX_MEMORY_UDEREF
29522+ pushl_cfi %gs
29523+ popl_cfi %ds
29524+#endif
29525+
29526+ENTRY(csum_partial_copy_generic)
29527 subl $4,%esp
29528 CFI_ADJUST_CFA_OFFSET 4
29529 pushl_cfi %edi
29530@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29531 jmp 4f
29532 SRC(1: movw (%esi), %bx )
29533 addl $2, %esi
29534-DST( movw %bx, (%edi) )
29535+DST( movw %bx, %es:(%edi) )
29536 addl $2, %edi
29537 addw %bx, %ax
29538 adcl $0, %eax
29539@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29540 SRC(1: movl (%esi), %ebx )
29541 SRC( movl 4(%esi), %edx )
29542 adcl %ebx, %eax
29543-DST( movl %ebx, (%edi) )
29544+DST( movl %ebx, %es:(%edi) )
29545 adcl %edx, %eax
29546-DST( movl %edx, 4(%edi) )
29547+DST( movl %edx, %es:4(%edi) )
29548
29549 SRC( movl 8(%esi), %ebx )
29550 SRC( movl 12(%esi), %edx )
29551 adcl %ebx, %eax
29552-DST( movl %ebx, 8(%edi) )
29553+DST( movl %ebx, %es:8(%edi) )
29554 adcl %edx, %eax
29555-DST( movl %edx, 12(%edi) )
29556+DST( movl %edx, %es:12(%edi) )
29557
29558 SRC( movl 16(%esi), %ebx )
29559 SRC( movl 20(%esi), %edx )
29560 adcl %ebx, %eax
29561-DST( movl %ebx, 16(%edi) )
29562+DST( movl %ebx, %es:16(%edi) )
29563 adcl %edx, %eax
29564-DST( movl %edx, 20(%edi) )
29565+DST( movl %edx, %es:20(%edi) )
29566
29567 SRC( movl 24(%esi), %ebx )
29568 SRC( movl 28(%esi), %edx )
29569 adcl %ebx, %eax
29570-DST( movl %ebx, 24(%edi) )
29571+DST( movl %ebx, %es:24(%edi) )
29572 adcl %edx, %eax
29573-DST( movl %edx, 28(%edi) )
29574+DST( movl %edx, %es:28(%edi) )
29575
29576 lea 32(%esi), %esi
29577 lea 32(%edi), %edi
29578@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29579 shrl $2, %edx # This clears CF
29580 SRC(3: movl (%esi), %ebx )
29581 adcl %ebx, %eax
29582-DST( movl %ebx, (%edi) )
29583+DST( movl %ebx, %es:(%edi) )
29584 lea 4(%esi), %esi
29585 lea 4(%edi), %edi
29586 dec %edx
29587@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29588 jb 5f
29589 SRC( movw (%esi), %cx )
29590 leal 2(%esi), %esi
29591-DST( movw %cx, (%edi) )
29592+DST( movw %cx, %es:(%edi) )
29593 leal 2(%edi), %edi
29594 je 6f
29595 shll $16,%ecx
29596 SRC(5: movb (%esi), %cl )
29597-DST( movb %cl, (%edi) )
29598+DST( movb %cl, %es:(%edi) )
29599 6: addl %ecx, %eax
29600 adcl $0, %eax
29601 7:
29602@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29603
29604 6001:
29605 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29606- movl $-EFAULT, (%ebx)
29607+ movl $-EFAULT, %ss:(%ebx)
29608
29609 # zero the complete destination - computing the rest
29610 # is too much work
29611@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29612
29613 6002:
29614 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29615- movl $-EFAULT,(%ebx)
29616+ movl $-EFAULT,%ss:(%ebx)
29617 jmp 5000b
29618
29619 .previous
29620
29621+ pushl_cfi %ss
29622+ popl_cfi %ds
29623+ pushl_cfi %ss
29624+ popl_cfi %es
29625 popl_cfi %ebx
29626 CFI_RESTORE ebx
29627 popl_cfi %esi
29628@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29629 popl_cfi %ecx # equivalent to addl $4,%esp
29630 ret
29631 CFI_ENDPROC
29632-ENDPROC(csum_partial_copy_generic)
29633+ENDPROC(csum_partial_copy_generic_to_user)
29634
29635 #else
29636
29637 /* Version for PentiumII/PPro */
29638
29639 #define ROUND1(x) \
29640+ nop; nop; nop; \
29641 SRC(movl x(%esi), %ebx ) ; \
29642 addl %ebx, %eax ; \
29643- DST(movl %ebx, x(%edi) ) ;
29644+ DST(movl %ebx, %es:x(%edi)) ;
29645
29646 #define ROUND(x) \
29647+ nop; nop; nop; \
29648 SRC(movl x(%esi), %ebx ) ; \
29649 adcl %ebx, %eax ; \
29650- DST(movl %ebx, x(%edi) ) ;
29651+ DST(movl %ebx, %es:x(%edi)) ;
29652
29653 #define ARGBASE 12
29654-
29655-ENTRY(csum_partial_copy_generic)
29656+
29657+ENTRY(csum_partial_copy_generic_to_user)
29658 CFI_STARTPROC
29659+
29660+#ifdef CONFIG_PAX_MEMORY_UDEREF
29661+ pushl_cfi %gs
29662+ popl_cfi %es
29663+ jmp csum_partial_copy_generic
29664+#endif
29665+
29666+ENTRY(csum_partial_copy_generic_from_user)
29667+
29668+#ifdef CONFIG_PAX_MEMORY_UDEREF
29669+ pushl_cfi %gs
29670+ popl_cfi %ds
29671+#endif
29672+
29673+ENTRY(csum_partial_copy_generic)
29674 pushl_cfi %ebx
29675 CFI_REL_OFFSET ebx, 0
29676 pushl_cfi %edi
29677@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29678 subl %ebx, %edi
29679 lea -1(%esi),%edx
29680 andl $-32,%edx
29681- lea 3f(%ebx,%ebx), %ebx
29682+ lea 3f(%ebx,%ebx,2), %ebx
29683 testl %esi, %esi
29684 jmp *%ebx
29685 1: addl $64,%esi
29686@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29687 jb 5f
29688 SRC( movw (%esi), %dx )
29689 leal 2(%esi), %esi
29690-DST( movw %dx, (%edi) )
29691+DST( movw %dx, %es:(%edi) )
29692 leal 2(%edi), %edi
29693 je 6f
29694 shll $16,%edx
29695 5:
29696 SRC( movb (%esi), %dl )
29697-DST( movb %dl, (%edi) )
29698+DST( movb %dl, %es:(%edi) )
29699 6: addl %edx, %eax
29700 adcl $0, %eax
29701 7:
29702 .section .fixup, "ax"
29703 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29704- movl $-EFAULT, (%ebx)
29705+ movl $-EFAULT, %ss:(%ebx)
29706 # zero the complete destination (computing the rest is too much work)
29707 movl ARGBASE+8(%esp),%edi # dst
29708 movl ARGBASE+12(%esp),%ecx # len
29709@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29710 rep; stosb
29711 jmp 7b
29712 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29713- movl $-EFAULT, (%ebx)
29714+ movl $-EFAULT, %ss:(%ebx)
29715 jmp 7b
29716 .previous
29717
29718+#ifdef CONFIG_PAX_MEMORY_UDEREF
29719+ pushl_cfi %ss
29720+ popl_cfi %ds
29721+ pushl_cfi %ss
29722+ popl_cfi %es
29723+#endif
29724+
29725 popl_cfi %esi
29726 CFI_RESTORE esi
29727 popl_cfi %edi
29728@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29729 CFI_RESTORE ebx
29730 ret
29731 CFI_ENDPROC
29732-ENDPROC(csum_partial_copy_generic)
29733+ENDPROC(csum_partial_copy_generic_to_user)
29734
29735 #undef ROUND
29736 #undef ROUND1
29737diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29738index f2145cf..cea889d 100644
29739--- a/arch/x86/lib/clear_page_64.S
29740+++ b/arch/x86/lib/clear_page_64.S
29741@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29742 movl $4096/8,%ecx
29743 xorl %eax,%eax
29744 rep stosq
29745+ pax_force_retaddr
29746 ret
29747 CFI_ENDPROC
29748 ENDPROC(clear_page_c)
29749@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29750 movl $4096,%ecx
29751 xorl %eax,%eax
29752 rep stosb
29753+ pax_force_retaddr
29754 ret
29755 CFI_ENDPROC
29756 ENDPROC(clear_page_c_e)
29757@@ -43,6 +45,7 @@ ENTRY(clear_page)
29758 leaq 64(%rdi),%rdi
29759 jnz .Lloop
29760 nop
29761+ pax_force_retaddr
29762 ret
29763 CFI_ENDPROC
29764 .Lclear_page_end:
29765@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29766
29767 #include <asm/cpufeature.h>
29768
29769- .section .altinstr_replacement,"ax"
29770+ .section .altinstr_replacement,"a"
29771 1: .byte 0xeb /* jmp <disp8> */
29772 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29773 2: .byte 0xeb /* jmp <disp8> */
29774diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29775index 1e572c5..2a162cd 100644
29776--- a/arch/x86/lib/cmpxchg16b_emu.S
29777+++ b/arch/x86/lib/cmpxchg16b_emu.S
29778@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
29779
29780 popf
29781 mov $1, %al
29782+ pax_force_retaddr
29783 ret
29784
29785 not_same:
29786 popf
29787 xor %al,%al
29788+ pax_force_retaddr
29789 ret
29790
29791 CFI_ENDPROC
29792diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29793index 176cca6..e0d658e 100644
29794--- a/arch/x86/lib/copy_page_64.S
29795+++ b/arch/x86/lib/copy_page_64.S
29796@@ -9,6 +9,7 @@ copy_page_rep:
29797 CFI_STARTPROC
29798 movl $4096/8, %ecx
29799 rep movsq
29800+ pax_force_retaddr
29801 ret
29802 CFI_ENDPROC
29803 ENDPROC(copy_page_rep)
29804@@ -24,8 +25,8 @@ ENTRY(copy_page)
29805 CFI_ADJUST_CFA_OFFSET 2*8
29806 movq %rbx, (%rsp)
29807 CFI_REL_OFFSET rbx, 0
29808- movq %r12, 1*8(%rsp)
29809- CFI_REL_OFFSET r12, 1*8
29810+ movq %r13, 1*8(%rsp)
29811+ CFI_REL_OFFSET r13, 1*8
29812
29813 movl $(4096/64)-5, %ecx
29814 .p2align 4
29815@@ -38,7 +39,7 @@ ENTRY(copy_page)
29816 movq 0x8*4(%rsi), %r9
29817 movq 0x8*5(%rsi), %r10
29818 movq 0x8*6(%rsi), %r11
29819- movq 0x8*7(%rsi), %r12
29820+ movq 0x8*7(%rsi), %r13
29821
29822 prefetcht0 5*64(%rsi)
29823
29824@@ -49,7 +50,7 @@ ENTRY(copy_page)
29825 movq %r9, 0x8*4(%rdi)
29826 movq %r10, 0x8*5(%rdi)
29827 movq %r11, 0x8*6(%rdi)
29828- movq %r12, 0x8*7(%rdi)
29829+ movq %r13, 0x8*7(%rdi)
29830
29831 leaq 64 (%rsi), %rsi
29832 leaq 64 (%rdi), %rdi
29833@@ -68,7 +69,7 @@ ENTRY(copy_page)
29834 movq 0x8*4(%rsi), %r9
29835 movq 0x8*5(%rsi), %r10
29836 movq 0x8*6(%rsi), %r11
29837- movq 0x8*7(%rsi), %r12
29838+ movq 0x8*7(%rsi), %r13
29839
29840 movq %rax, 0x8*0(%rdi)
29841 movq %rbx, 0x8*1(%rdi)
29842@@ -77,7 +78,7 @@ ENTRY(copy_page)
29843 movq %r9, 0x8*4(%rdi)
29844 movq %r10, 0x8*5(%rdi)
29845 movq %r11, 0x8*6(%rdi)
29846- movq %r12, 0x8*7(%rdi)
29847+ movq %r13, 0x8*7(%rdi)
29848
29849 leaq 64(%rdi), %rdi
29850 leaq 64(%rsi), %rsi
29851@@ -85,10 +86,11 @@ ENTRY(copy_page)
29852
29853 movq (%rsp), %rbx
29854 CFI_RESTORE rbx
29855- movq 1*8(%rsp), %r12
29856- CFI_RESTORE r12
29857+ movq 1*8(%rsp), %r13
29858+ CFI_RESTORE r13
29859 addq $2*8, %rsp
29860 CFI_ADJUST_CFA_OFFSET -2*8
29861+ pax_force_retaddr
29862 ret
29863 .Lcopy_page_end:
29864 CFI_ENDPROC
29865@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29866
29867 #include <asm/cpufeature.h>
29868
29869- .section .altinstr_replacement,"ax"
29870+ .section .altinstr_replacement,"a"
29871 1: .byte 0xeb /* jmp <disp8> */
29872 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29873 2:
29874diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29875index dee945d..a84067b 100644
29876--- a/arch/x86/lib/copy_user_64.S
29877+++ b/arch/x86/lib/copy_user_64.S
29878@@ -18,31 +18,7 @@
29879 #include <asm/alternative-asm.h>
29880 #include <asm/asm.h>
29881 #include <asm/smap.h>
29882-
29883-/*
29884- * By placing feature2 after feature1 in altinstructions section, we logically
29885- * implement:
29886- * If CPU has feature2, jmp to alt2 is used
29887- * else if CPU has feature1, jmp to alt1 is used
29888- * else jmp to orig is used.
29889- */
29890- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29891-0:
29892- .byte 0xe9 /* 32bit jump */
29893- .long \orig-1f /* by default jump to orig */
29894-1:
29895- .section .altinstr_replacement,"ax"
29896-2: .byte 0xe9 /* near jump with 32bit immediate */
29897- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29898-3: .byte 0xe9 /* near jump with 32bit immediate */
29899- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29900- .previous
29901-
29902- .section .altinstructions,"a"
29903- altinstruction_entry 0b,2b,\feature1,5,5
29904- altinstruction_entry 0b,3b,\feature2,5,5
29905- .previous
29906- .endm
29907+#include <asm/pgtable.h>
29908
29909 .macro ALIGN_DESTINATION
29910 #ifdef FIX_ALIGNMENT
29911@@ -70,52 +46,6 @@
29912 #endif
29913 .endm
29914
29915-/* Standard copy_to_user with segment limit checking */
29916-ENTRY(_copy_to_user)
29917- CFI_STARTPROC
29918- GET_THREAD_INFO(%rax)
29919- movq %rdi,%rcx
29920- addq %rdx,%rcx
29921- jc bad_to_user
29922- cmpq TI_addr_limit(%rax),%rcx
29923- ja bad_to_user
29924- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29925- copy_user_generic_unrolled,copy_user_generic_string, \
29926- copy_user_enhanced_fast_string
29927- CFI_ENDPROC
29928-ENDPROC(_copy_to_user)
29929-
29930-/* Standard copy_from_user with segment limit checking */
29931-ENTRY(_copy_from_user)
29932- CFI_STARTPROC
29933- GET_THREAD_INFO(%rax)
29934- movq %rsi,%rcx
29935- addq %rdx,%rcx
29936- jc bad_from_user
29937- cmpq TI_addr_limit(%rax),%rcx
29938- ja bad_from_user
29939- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29940- copy_user_generic_unrolled,copy_user_generic_string, \
29941- copy_user_enhanced_fast_string
29942- CFI_ENDPROC
29943-ENDPROC(_copy_from_user)
29944-
29945- .section .fixup,"ax"
29946- /* must zero dest */
29947-ENTRY(bad_from_user)
29948-bad_from_user:
29949- CFI_STARTPROC
29950- movl %edx,%ecx
29951- xorl %eax,%eax
29952- rep
29953- stosb
29954-bad_to_user:
29955- movl %edx,%eax
29956- ret
29957- CFI_ENDPROC
29958-ENDPROC(bad_from_user)
29959- .previous
29960-
29961 /*
29962 * copy_user_generic_unrolled - memory copy with exception handling.
29963 * This version is for CPUs like P4 that don't have efficient micro
29964@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
29965 */
29966 ENTRY(copy_user_generic_unrolled)
29967 CFI_STARTPROC
29968+ ASM_PAX_OPEN_USERLAND
29969 ASM_STAC
29970 cmpl $8,%edx
29971 jb 20f /* less then 8 bytes, go to byte copy loop */
29972@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
29973 jnz 21b
29974 23: xor %eax,%eax
29975 ASM_CLAC
29976+ ASM_PAX_CLOSE_USERLAND
29977+ pax_force_retaddr
29978 ret
29979
29980 .section .fixup,"ax"
29981@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
29982 */
29983 ENTRY(copy_user_generic_string)
29984 CFI_STARTPROC
29985+ ASM_PAX_OPEN_USERLAND
29986 ASM_STAC
29987 cmpl $8,%edx
29988 jb 2f /* less than 8 bytes, go to byte copy loop */
29989@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
29990 movsb
29991 xorl %eax,%eax
29992 ASM_CLAC
29993+ ASM_PAX_CLOSE_USERLAND
29994+ pax_force_retaddr
29995 ret
29996
29997 .section .fixup,"ax"
29998@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
29999 */
30000 ENTRY(copy_user_enhanced_fast_string)
30001 CFI_STARTPROC
30002+ ASM_PAX_OPEN_USERLAND
30003 ASM_STAC
30004 movl %edx,%ecx
30005 1: rep
30006 movsb
30007 xorl %eax,%eax
30008 ASM_CLAC
30009+ ASM_PAX_CLOSE_USERLAND
30010+ pax_force_retaddr
30011 ret
30012
30013 .section .fixup,"ax"
30014diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
30015index 6a4f43c..c70fb52 100644
30016--- a/arch/x86/lib/copy_user_nocache_64.S
30017+++ b/arch/x86/lib/copy_user_nocache_64.S
30018@@ -8,6 +8,7 @@
30019
30020 #include <linux/linkage.h>
30021 #include <asm/dwarf2.h>
30022+#include <asm/alternative-asm.h>
30023
30024 #define FIX_ALIGNMENT 1
30025
30026@@ -16,6 +17,7 @@
30027 #include <asm/thread_info.h>
30028 #include <asm/asm.h>
30029 #include <asm/smap.h>
30030+#include <asm/pgtable.h>
30031
30032 .macro ALIGN_DESTINATION
30033 #ifdef FIX_ALIGNMENT
30034@@ -49,6 +51,16 @@
30035 */
30036 ENTRY(__copy_user_nocache)
30037 CFI_STARTPROC
30038+
30039+#ifdef CONFIG_PAX_MEMORY_UDEREF
30040+ mov pax_user_shadow_base,%rcx
30041+ cmp %rcx,%rsi
30042+ jae 1f
30043+ add %rcx,%rsi
30044+1:
30045+#endif
30046+
30047+ ASM_PAX_OPEN_USERLAND
30048 ASM_STAC
30049 cmpl $8,%edx
30050 jb 20f /* less then 8 bytes, go to byte copy loop */
30051@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30052 jnz 21b
30053 23: xorl %eax,%eax
30054 ASM_CLAC
30055+ ASM_PAX_CLOSE_USERLAND
30056 sfence
30057+ pax_force_retaddr
30058 ret
30059
30060 .section .fixup,"ax"
30061diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30062index 2419d5f..fe52d0e 100644
30063--- a/arch/x86/lib/csum-copy_64.S
30064+++ b/arch/x86/lib/csum-copy_64.S
30065@@ -9,6 +9,7 @@
30066 #include <asm/dwarf2.h>
30067 #include <asm/errno.h>
30068 #include <asm/asm.h>
30069+#include <asm/alternative-asm.h>
30070
30071 /*
30072 * Checksum copy with exception handling.
30073@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30074 CFI_ADJUST_CFA_OFFSET 7*8
30075 movq %rbx, 2*8(%rsp)
30076 CFI_REL_OFFSET rbx, 2*8
30077- movq %r12, 3*8(%rsp)
30078- CFI_REL_OFFSET r12, 3*8
30079+ movq %r15, 3*8(%rsp)
30080+ CFI_REL_OFFSET r15, 3*8
30081 movq %r14, 4*8(%rsp)
30082 CFI_REL_OFFSET r14, 4*8
30083 movq %r13, 5*8(%rsp)
30084@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30085 movl %edx, %ecx
30086
30087 xorl %r9d, %r9d
30088- movq %rcx, %r12
30089+ movq %rcx, %r15
30090
30091- shrq $6, %r12
30092+ shrq $6, %r15
30093 jz .Lhandle_tail /* < 64 */
30094
30095 clc
30096
30097 /* main loop. clear in 64 byte blocks */
30098 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30099- /* r11: temp3, rdx: temp4, r12 loopcnt */
30100+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30101 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30102 .p2align 4
30103 .Lloop:
30104@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30105 adcq %r14, %rax
30106 adcq %r13, %rax
30107
30108- decl %r12d
30109+ decl %r15d
30110
30111 dest
30112 movq %rbx, (%rsi)
30113@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30114 .Lende:
30115 movq 2*8(%rsp), %rbx
30116 CFI_RESTORE rbx
30117- movq 3*8(%rsp), %r12
30118- CFI_RESTORE r12
30119+ movq 3*8(%rsp), %r15
30120+ CFI_RESTORE r15
30121 movq 4*8(%rsp), %r14
30122 CFI_RESTORE r14
30123 movq 5*8(%rsp), %r13
30124@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30125 CFI_RESTORE rbp
30126 addq $7*8, %rsp
30127 CFI_ADJUST_CFA_OFFSET -7*8
30128+ pax_force_retaddr
30129 ret
30130 CFI_RESTORE_STATE
30131
30132diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30133index 7609e0e..b449b98 100644
30134--- a/arch/x86/lib/csum-wrappers_64.c
30135+++ b/arch/x86/lib/csum-wrappers_64.c
30136@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30137 len -= 2;
30138 }
30139 }
30140+ pax_open_userland();
30141 stac();
30142- isum = csum_partial_copy_generic((__force const void *)src,
30143+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30144 dst, len, isum, errp, NULL);
30145 clac();
30146+ pax_close_userland();
30147 if (unlikely(*errp))
30148 goto out_err;
30149
30150@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30151 }
30152
30153 *errp = 0;
30154+ pax_open_userland();
30155 stac();
30156- ret = csum_partial_copy_generic(src, (void __force *)dst,
30157+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30158 len, isum, NULL, errp);
30159 clac();
30160+ pax_close_userland();
30161 return ret;
30162 }
30163 EXPORT_SYMBOL(csum_partial_copy_to_user);
30164diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30165index a451235..1daa956 100644
30166--- a/arch/x86/lib/getuser.S
30167+++ b/arch/x86/lib/getuser.S
30168@@ -33,17 +33,40 @@
30169 #include <asm/thread_info.h>
30170 #include <asm/asm.h>
30171 #include <asm/smap.h>
30172+#include <asm/segment.h>
30173+#include <asm/pgtable.h>
30174+#include <asm/alternative-asm.h>
30175+
30176+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30177+#define __copyuser_seg gs;
30178+#else
30179+#define __copyuser_seg
30180+#endif
30181
30182 .text
30183 ENTRY(__get_user_1)
30184 CFI_STARTPROC
30185+
30186+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30187 GET_THREAD_INFO(%_ASM_DX)
30188 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30189 jae bad_get_user
30190 ASM_STAC
30191-1: movzbl (%_ASM_AX),%edx
30192+
30193+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30194+ mov pax_user_shadow_base,%_ASM_DX
30195+ cmp %_ASM_DX,%_ASM_AX
30196+ jae 1234f
30197+ add %_ASM_DX,%_ASM_AX
30198+1234:
30199+#endif
30200+
30201+#endif
30202+
30203+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30204 xor %eax,%eax
30205 ASM_CLAC
30206+ pax_force_retaddr
30207 ret
30208 CFI_ENDPROC
30209 ENDPROC(__get_user_1)
30210@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30211 ENTRY(__get_user_2)
30212 CFI_STARTPROC
30213 add $1,%_ASM_AX
30214+
30215+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30216 jc bad_get_user
30217 GET_THREAD_INFO(%_ASM_DX)
30218 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30219 jae bad_get_user
30220 ASM_STAC
30221-2: movzwl -1(%_ASM_AX),%edx
30222+
30223+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30224+ mov pax_user_shadow_base,%_ASM_DX
30225+ cmp %_ASM_DX,%_ASM_AX
30226+ jae 1234f
30227+ add %_ASM_DX,%_ASM_AX
30228+1234:
30229+#endif
30230+
30231+#endif
30232+
30233+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30234 xor %eax,%eax
30235 ASM_CLAC
30236+ pax_force_retaddr
30237 ret
30238 CFI_ENDPROC
30239 ENDPROC(__get_user_2)
30240@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30241 ENTRY(__get_user_4)
30242 CFI_STARTPROC
30243 add $3,%_ASM_AX
30244+
30245+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30246 jc bad_get_user
30247 GET_THREAD_INFO(%_ASM_DX)
30248 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30249 jae bad_get_user
30250 ASM_STAC
30251-3: movl -3(%_ASM_AX),%edx
30252+
30253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30254+ mov pax_user_shadow_base,%_ASM_DX
30255+ cmp %_ASM_DX,%_ASM_AX
30256+ jae 1234f
30257+ add %_ASM_DX,%_ASM_AX
30258+1234:
30259+#endif
30260+
30261+#endif
30262+
30263+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30264 xor %eax,%eax
30265 ASM_CLAC
30266+ pax_force_retaddr
30267 ret
30268 CFI_ENDPROC
30269 ENDPROC(__get_user_4)
30270@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30271 GET_THREAD_INFO(%_ASM_DX)
30272 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30273 jae bad_get_user
30274+
30275+#ifdef CONFIG_PAX_MEMORY_UDEREF
30276+ mov pax_user_shadow_base,%_ASM_DX
30277+ cmp %_ASM_DX,%_ASM_AX
30278+ jae 1234f
30279+ add %_ASM_DX,%_ASM_AX
30280+1234:
30281+#endif
30282+
30283 ASM_STAC
30284 4: movq -7(%_ASM_AX),%rdx
30285 xor %eax,%eax
30286 ASM_CLAC
30287+ pax_force_retaddr
30288 ret
30289 #else
30290 add $7,%_ASM_AX
30291@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30292 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30293 jae bad_get_user_8
30294 ASM_STAC
30295-4: movl -7(%_ASM_AX),%edx
30296-5: movl -3(%_ASM_AX),%ecx
30297+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30298+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30299 xor %eax,%eax
30300 ASM_CLAC
30301+ pax_force_retaddr
30302 ret
30303 #endif
30304 CFI_ENDPROC
30305@@ -113,6 +175,7 @@ bad_get_user:
30306 xor %edx,%edx
30307 mov $(-EFAULT),%_ASM_AX
30308 ASM_CLAC
30309+ pax_force_retaddr
30310 ret
30311 CFI_ENDPROC
30312 END(bad_get_user)
30313@@ -124,6 +187,7 @@ bad_get_user_8:
30314 xor %ecx,%ecx
30315 mov $(-EFAULT),%_ASM_AX
30316 ASM_CLAC
30317+ pax_force_retaddr
30318 ret
30319 CFI_ENDPROC
30320 END(bad_get_user_8)
30321diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30322index 54fcffe..7be149e 100644
30323--- a/arch/x86/lib/insn.c
30324+++ b/arch/x86/lib/insn.c
30325@@ -20,8 +20,10 @@
30326
30327 #ifdef __KERNEL__
30328 #include <linux/string.h>
30329+#include <asm/pgtable_types.h>
30330 #else
30331 #include <string.h>
30332+#define ktla_ktva(addr) addr
30333 #endif
30334 #include <asm/inat.h>
30335 #include <asm/insn.h>
30336@@ -53,8 +55,8 @@
30337 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
30338 {
30339 memset(insn, 0, sizeof(*insn));
30340- insn->kaddr = kaddr;
30341- insn->next_byte = kaddr;
30342+ insn->kaddr = ktla_ktva(kaddr);
30343+ insn->next_byte = ktla_ktva(kaddr);
30344 insn->x86_64 = x86_64 ? 1 : 0;
30345 insn->opnd_bytes = 4;
30346 if (x86_64)
30347diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30348index 05a95e7..326f2fa 100644
30349--- a/arch/x86/lib/iomap_copy_64.S
30350+++ b/arch/x86/lib/iomap_copy_64.S
30351@@ -17,6 +17,7 @@
30352
30353 #include <linux/linkage.h>
30354 #include <asm/dwarf2.h>
30355+#include <asm/alternative-asm.h>
30356
30357 /*
30358 * override generic version in lib/iomap_copy.c
30359@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30360 CFI_STARTPROC
30361 movl %edx,%ecx
30362 rep movsd
30363+ pax_force_retaddr
30364 ret
30365 CFI_ENDPROC
30366 ENDPROC(__iowrite32_copy)
30367diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30368index 56313a3..0db417e 100644
30369--- a/arch/x86/lib/memcpy_64.S
30370+++ b/arch/x86/lib/memcpy_64.S
30371@@ -24,7 +24,7 @@
30372 * This gets patched over the unrolled variant (below) via the
30373 * alternative instructions framework:
30374 */
30375- .section .altinstr_replacement, "ax", @progbits
30376+ .section .altinstr_replacement, "a", @progbits
30377 .Lmemcpy_c:
30378 movq %rdi, %rax
30379 movq %rdx, %rcx
30380@@ -33,6 +33,7 @@
30381 rep movsq
30382 movl %edx, %ecx
30383 rep movsb
30384+ pax_force_retaddr
30385 ret
30386 .Lmemcpy_e:
30387 .previous
30388@@ -44,11 +45,12 @@
30389 * This gets patched over the unrolled variant (below) via the
30390 * alternative instructions framework:
30391 */
30392- .section .altinstr_replacement, "ax", @progbits
30393+ .section .altinstr_replacement, "a", @progbits
30394 .Lmemcpy_c_e:
30395 movq %rdi, %rax
30396 movq %rdx, %rcx
30397 rep movsb
30398+ pax_force_retaddr
30399 ret
30400 .Lmemcpy_e_e:
30401 .previous
30402@@ -136,6 +138,7 @@ ENTRY(memcpy)
30403 movq %r9, 1*8(%rdi)
30404 movq %r10, -2*8(%rdi, %rdx)
30405 movq %r11, -1*8(%rdi, %rdx)
30406+ pax_force_retaddr
30407 retq
30408 .p2align 4
30409 .Lless_16bytes:
30410@@ -148,6 +151,7 @@ ENTRY(memcpy)
30411 movq -1*8(%rsi, %rdx), %r9
30412 movq %r8, 0*8(%rdi)
30413 movq %r9, -1*8(%rdi, %rdx)
30414+ pax_force_retaddr
30415 retq
30416 .p2align 4
30417 .Lless_8bytes:
30418@@ -161,6 +165,7 @@ ENTRY(memcpy)
30419 movl -4(%rsi, %rdx), %r8d
30420 movl %ecx, (%rdi)
30421 movl %r8d, -4(%rdi, %rdx)
30422+ pax_force_retaddr
30423 retq
30424 .p2align 4
30425 .Lless_3bytes:
30426@@ -179,6 +184,7 @@ ENTRY(memcpy)
30427 movb %cl, (%rdi)
30428
30429 .Lend:
30430+ pax_force_retaddr
30431 retq
30432 CFI_ENDPROC
30433 ENDPROC(memcpy)
30434diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30435index 65268a6..dd1de11 100644
30436--- a/arch/x86/lib/memmove_64.S
30437+++ b/arch/x86/lib/memmove_64.S
30438@@ -202,14 +202,16 @@ ENTRY(memmove)
30439 movb (%rsi), %r11b
30440 movb %r11b, (%rdi)
30441 13:
30442+ pax_force_retaddr
30443 retq
30444 CFI_ENDPROC
30445
30446- .section .altinstr_replacement,"ax"
30447+ .section .altinstr_replacement,"a"
30448 .Lmemmove_begin_forward_efs:
30449 /* Forward moving data. */
30450 movq %rdx, %rcx
30451 rep movsb
30452+ pax_force_retaddr
30453 retq
30454 .Lmemmove_end_forward_efs:
30455 .previous
30456diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30457index 2dcb380..2eb79fe 100644
30458--- a/arch/x86/lib/memset_64.S
30459+++ b/arch/x86/lib/memset_64.S
30460@@ -16,7 +16,7 @@
30461 *
30462 * rax original destination
30463 */
30464- .section .altinstr_replacement, "ax", @progbits
30465+ .section .altinstr_replacement, "a", @progbits
30466 .Lmemset_c:
30467 movq %rdi,%r9
30468 movq %rdx,%rcx
30469@@ -30,6 +30,7 @@
30470 movl %edx,%ecx
30471 rep stosb
30472 movq %r9,%rax
30473+ pax_force_retaddr
30474 ret
30475 .Lmemset_e:
30476 .previous
30477@@ -45,13 +46,14 @@
30478 *
30479 * rax original destination
30480 */
30481- .section .altinstr_replacement, "ax", @progbits
30482+ .section .altinstr_replacement, "a", @progbits
30483 .Lmemset_c_e:
30484 movq %rdi,%r9
30485 movb %sil,%al
30486 movq %rdx,%rcx
30487 rep stosb
30488 movq %r9,%rax
30489+ pax_force_retaddr
30490 ret
30491 .Lmemset_e_e:
30492 .previous
30493@@ -118,6 +120,7 @@ ENTRY(__memset)
30494
30495 .Lende:
30496 movq %r10,%rax
30497+ pax_force_retaddr
30498 ret
30499
30500 CFI_RESTORE_STATE
30501diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30502index c9f2d9b..e7fd2c0 100644
30503--- a/arch/x86/lib/mmx_32.c
30504+++ b/arch/x86/lib/mmx_32.c
30505@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30506 {
30507 void *p;
30508 int i;
30509+ unsigned long cr0;
30510
30511 if (unlikely(in_interrupt()))
30512 return __memcpy(to, from, len);
30513@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30514 kernel_fpu_begin();
30515
30516 __asm__ __volatile__ (
30517- "1: prefetch (%0)\n" /* This set is 28 bytes */
30518- " prefetch 64(%0)\n"
30519- " prefetch 128(%0)\n"
30520- " prefetch 192(%0)\n"
30521- " prefetch 256(%0)\n"
30522+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30523+ " prefetch 64(%1)\n"
30524+ " prefetch 128(%1)\n"
30525+ " prefetch 192(%1)\n"
30526+ " prefetch 256(%1)\n"
30527 "2: \n"
30528 ".section .fixup, \"ax\"\n"
30529- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30530+ "3: \n"
30531+
30532+#ifdef CONFIG_PAX_KERNEXEC
30533+ " movl %%cr0, %0\n"
30534+ " movl %0, %%eax\n"
30535+ " andl $0xFFFEFFFF, %%eax\n"
30536+ " movl %%eax, %%cr0\n"
30537+#endif
30538+
30539+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30540+
30541+#ifdef CONFIG_PAX_KERNEXEC
30542+ " movl %0, %%cr0\n"
30543+#endif
30544+
30545 " jmp 2b\n"
30546 ".previous\n"
30547 _ASM_EXTABLE(1b, 3b)
30548- : : "r" (from));
30549+ : "=&r" (cr0) : "r" (from) : "ax");
30550
30551 for ( ; i > 5; i--) {
30552 __asm__ __volatile__ (
30553- "1: prefetch 320(%0)\n"
30554- "2: movq (%0), %%mm0\n"
30555- " movq 8(%0), %%mm1\n"
30556- " movq 16(%0), %%mm2\n"
30557- " movq 24(%0), %%mm3\n"
30558- " movq %%mm0, (%1)\n"
30559- " movq %%mm1, 8(%1)\n"
30560- " movq %%mm2, 16(%1)\n"
30561- " movq %%mm3, 24(%1)\n"
30562- " movq 32(%0), %%mm0\n"
30563- " movq 40(%0), %%mm1\n"
30564- " movq 48(%0), %%mm2\n"
30565- " movq 56(%0), %%mm3\n"
30566- " movq %%mm0, 32(%1)\n"
30567- " movq %%mm1, 40(%1)\n"
30568- " movq %%mm2, 48(%1)\n"
30569- " movq %%mm3, 56(%1)\n"
30570+ "1: prefetch 320(%1)\n"
30571+ "2: movq (%1), %%mm0\n"
30572+ " movq 8(%1), %%mm1\n"
30573+ " movq 16(%1), %%mm2\n"
30574+ " movq 24(%1), %%mm3\n"
30575+ " movq %%mm0, (%2)\n"
30576+ " movq %%mm1, 8(%2)\n"
30577+ " movq %%mm2, 16(%2)\n"
30578+ " movq %%mm3, 24(%2)\n"
30579+ " movq 32(%1), %%mm0\n"
30580+ " movq 40(%1), %%mm1\n"
30581+ " movq 48(%1), %%mm2\n"
30582+ " movq 56(%1), %%mm3\n"
30583+ " movq %%mm0, 32(%2)\n"
30584+ " movq %%mm1, 40(%2)\n"
30585+ " movq %%mm2, 48(%2)\n"
30586+ " movq %%mm3, 56(%2)\n"
30587 ".section .fixup, \"ax\"\n"
30588- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30589+ "3:\n"
30590+
30591+#ifdef CONFIG_PAX_KERNEXEC
30592+ " movl %%cr0, %0\n"
30593+ " movl %0, %%eax\n"
30594+ " andl $0xFFFEFFFF, %%eax\n"
30595+ " movl %%eax, %%cr0\n"
30596+#endif
30597+
30598+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30599+
30600+#ifdef CONFIG_PAX_KERNEXEC
30601+ " movl %0, %%cr0\n"
30602+#endif
30603+
30604 " jmp 2b\n"
30605 ".previous\n"
30606 _ASM_EXTABLE(1b, 3b)
30607- : : "r" (from), "r" (to) : "memory");
30608+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30609
30610 from += 64;
30611 to += 64;
30612@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30613 static void fast_copy_page(void *to, void *from)
30614 {
30615 int i;
30616+ unsigned long cr0;
30617
30618 kernel_fpu_begin();
30619
30620@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30621 * but that is for later. -AV
30622 */
30623 __asm__ __volatile__(
30624- "1: prefetch (%0)\n"
30625- " prefetch 64(%0)\n"
30626- " prefetch 128(%0)\n"
30627- " prefetch 192(%0)\n"
30628- " prefetch 256(%0)\n"
30629+ "1: prefetch (%1)\n"
30630+ " prefetch 64(%1)\n"
30631+ " prefetch 128(%1)\n"
30632+ " prefetch 192(%1)\n"
30633+ " prefetch 256(%1)\n"
30634 "2: \n"
30635 ".section .fixup, \"ax\"\n"
30636- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30637+ "3: \n"
30638+
30639+#ifdef CONFIG_PAX_KERNEXEC
30640+ " movl %%cr0, %0\n"
30641+ " movl %0, %%eax\n"
30642+ " andl $0xFFFEFFFF, %%eax\n"
30643+ " movl %%eax, %%cr0\n"
30644+#endif
30645+
30646+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30647+
30648+#ifdef CONFIG_PAX_KERNEXEC
30649+ " movl %0, %%cr0\n"
30650+#endif
30651+
30652 " jmp 2b\n"
30653 ".previous\n"
30654- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30655+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30656
30657 for (i = 0; i < (4096-320)/64; i++) {
30658 __asm__ __volatile__ (
30659- "1: prefetch 320(%0)\n"
30660- "2: movq (%0), %%mm0\n"
30661- " movntq %%mm0, (%1)\n"
30662- " movq 8(%0), %%mm1\n"
30663- " movntq %%mm1, 8(%1)\n"
30664- " movq 16(%0), %%mm2\n"
30665- " movntq %%mm2, 16(%1)\n"
30666- " movq 24(%0), %%mm3\n"
30667- " movntq %%mm3, 24(%1)\n"
30668- " movq 32(%0), %%mm4\n"
30669- " movntq %%mm4, 32(%1)\n"
30670- " movq 40(%0), %%mm5\n"
30671- " movntq %%mm5, 40(%1)\n"
30672- " movq 48(%0), %%mm6\n"
30673- " movntq %%mm6, 48(%1)\n"
30674- " movq 56(%0), %%mm7\n"
30675- " movntq %%mm7, 56(%1)\n"
30676+ "1: prefetch 320(%1)\n"
30677+ "2: movq (%1), %%mm0\n"
30678+ " movntq %%mm0, (%2)\n"
30679+ " movq 8(%1), %%mm1\n"
30680+ " movntq %%mm1, 8(%2)\n"
30681+ " movq 16(%1), %%mm2\n"
30682+ " movntq %%mm2, 16(%2)\n"
30683+ " movq 24(%1), %%mm3\n"
30684+ " movntq %%mm3, 24(%2)\n"
30685+ " movq 32(%1), %%mm4\n"
30686+ " movntq %%mm4, 32(%2)\n"
30687+ " movq 40(%1), %%mm5\n"
30688+ " movntq %%mm5, 40(%2)\n"
30689+ " movq 48(%1), %%mm6\n"
30690+ " movntq %%mm6, 48(%2)\n"
30691+ " movq 56(%1), %%mm7\n"
30692+ " movntq %%mm7, 56(%2)\n"
30693 ".section .fixup, \"ax\"\n"
30694- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30695+ "3:\n"
30696+
30697+#ifdef CONFIG_PAX_KERNEXEC
30698+ " movl %%cr0, %0\n"
30699+ " movl %0, %%eax\n"
30700+ " andl $0xFFFEFFFF, %%eax\n"
30701+ " movl %%eax, %%cr0\n"
30702+#endif
30703+
30704+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30705+
30706+#ifdef CONFIG_PAX_KERNEXEC
30707+ " movl %0, %%cr0\n"
30708+#endif
30709+
30710 " jmp 2b\n"
30711 ".previous\n"
30712- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30713+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30714
30715 from += 64;
30716 to += 64;
30717@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30718 static void fast_copy_page(void *to, void *from)
30719 {
30720 int i;
30721+ unsigned long cr0;
30722
30723 kernel_fpu_begin();
30724
30725 __asm__ __volatile__ (
30726- "1: prefetch (%0)\n"
30727- " prefetch 64(%0)\n"
30728- " prefetch 128(%0)\n"
30729- " prefetch 192(%0)\n"
30730- " prefetch 256(%0)\n"
30731+ "1: prefetch (%1)\n"
30732+ " prefetch 64(%1)\n"
30733+ " prefetch 128(%1)\n"
30734+ " prefetch 192(%1)\n"
30735+ " prefetch 256(%1)\n"
30736 "2: \n"
30737 ".section .fixup, \"ax\"\n"
30738- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30739+ "3: \n"
30740+
30741+#ifdef CONFIG_PAX_KERNEXEC
30742+ " movl %%cr0, %0\n"
30743+ " movl %0, %%eax\n"
30744+ " andl $0xFFFEFFFF, %%eax\n"
30745+ " movl %%eax, %%cr0\n"
30746+#endif
30747+
30748+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30749+
30750+#ifdef CONFIG_PAX_KERNEXEC
30751+ " movl %0, %%cr0\n"
30752+#endif
30753+
30754 " jmp 2b\n"
30755 ".previous\n"
30756- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30757+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30758
30759 for (i = 0; i < 4096/64; i++) {
30760 __asm__ __volatile__ (
30761- "1: prefetch 320(%0)\n"
30762- "2: movq (%0), %%mm0\n"
30763- " movq 8(%0), %%mm1\n"
30764- " movq 16(%0), %%mm2\n"
30765- " movq 24(%0), %%mm3\n"
30766- " movq %%mm0, (%1)\n"
30767- " movq %%mm1, 8(%1)\n"
30768- " movq %%mm2, 16(%1)\n"
30769- " movq %%mm3, 24(%1)\n"
30770- " movq 32(%0), %%mm0\n"
30771- " movq 40(%0), %%mm1\n"
30772- " movq 48(%0), %%mm2\n"
30773- " movq 56(%0), %%mm3\n"
30774- " movq %%mm0, 32(%1)\n"
30775- " movq %%mm1, 40(%1)\n"
30776- " movq %%mm2, 48(%1)\n"
30777- " movq %%mm3, 56(%1)\n"
30778+ "1: prefetch 320(%1)\n"
30779+ "2: movq (%1), %%mm0\n"
30780+ " movq 8(%1), %%mm1\n"
30781+ " movq 16(%1), %%mm2\n"
30782+ " movq 24(%1), %%mm3\n"
30783+ " movq %%mm0, (%2)\n"
30784+ " movq %%mm1, 8(%2)\n"
30785+ " movq %%mm2, 16(%2)\n"
30786+ " movq %%mm3, 24(%2)\n"
30787+ " movq 32(%1), %%mm0\n"
30788+ " movq 40(%1), %%mm1\n"
30789+ " movq 48(%1), %%mm2\n"
30790+ " movq 56(%1), %%mm3\n"
30791+ " movq %%mm0, 32(%2)\n"
30792+ " movq %%mm1, 40(%2)\n"
30793+ " movq %%mm2, 48(%2)\n"
30794+ " movq %%mm3, 56(%2)\n"
30795 ".section .fixup, \"ax\"\n"
30796- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30797+ "3:\n"
30798+
30799+#ifdef CONFIG_PAX_KERNEXEC
30800+ " movl %%cr0, %0\n"
30801+ " movl %0, %%eax\n"
30802+ " andl $0xFFFEFFFF, %%eax\n"
30803+ " movl %%eax, %%cr0\n"
30804+#endif
30805+
30806+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30807+
30808+#ifdef CONFIG_PAX_KERNEXEC
30809+ " movl %0, %%cr0\n"
30810+#endif
30811+
30812 " jmp 2b\n"
30813 ".previous\n"
30814 _ASM_EXTABLE(1b, 3b)
30815- : : "r" (from), "r" (to) : "memory");
30816+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30817
30818 from += 64;
30819 to += 64;
30820diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30821index f6d13ee..d789440 100644
30822--- a/arch/x86/lib/msr-reg.S
30823+++ b/arch/x86/lib/msr-reg.S
30824@@ -3,6 +3,7 @@
30825 #include <asm/dwarf2.h>
30826 #include <asm/asm.h>
30827 #include <asm/msr.h>
30828+#include <asm/alternative-asm.h>
30829
30830 #ifdef CONFIG_X86_64
30831 /*
30832@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30833 movl %edi, 28(%r10)
30834 popq_cfi %rbp
30835 popq_cfi %rbx
30836+ pax_force_retaddr
30837 ret
30838 3:
30839 CFI_RESTORE_STATE
30840diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30841index fc6ba17..d4d989d 100644
30842--- a/arch/x86/lib/putuser.S
30843+++ b/arch/x86/lib/putuser.S
30844@@ -16,7 +16,9 @@
30845 #include <asm/errno.h>
30846 #include <asm/asm.h>
30847 #include <asm/smap.h>
30848-
30849+#include <asm/segment.h>
30850+#include <asm/pgtable.h>
30851+#include <asm/alternative-asm.h>
30852
30853 /*
30854 * __put_user_X
30855@@ -30,57 +32,125 @@
30856 * as they get called from within inline assembly.
30857 */
30858
30859-#define ENTER CFI_STARTPROC ; \
30860- GET_THREAD_INFO(%_ASM_BX)
30861-#define EXIT ASM_CLAC ; \
30862- ret ; \
30863+#define ENTER CFI_STARTPROC
30864+#define EXIT ASM_CLAC ; \
30865+ pax_force_retaddr ; \
30866+ ret ; \
30867 CFI_ENDPROC
30868
30869+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30870+#define _DEST %_ASM_CX,%_ASM_BX
30871+#else
30872+#define _DEST %_ASM_CX
30873+#endif
30874+
30875+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30876+#define __copyuser_seg gs;
30877+#else
30878+#define __copyuser_seg
30879+#endif
30880+
30881 .text
30882 ENTRY(__put_user_1)
30883 ENTER
30884+
30885+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30886+ GET_THREAD_INFO(%_ASM_BX)
30887 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30888 jae bad_put_user
30889 ASM_STAC
30890-1: movb %al,(%_ASM_CX)
30891+
30892+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30893+ mov pax_user_shadow_base,%_ASM_BX
30894+ cmp %_ASM_BX,%_ASM_CX
30895+ jb 1234f
30896+ xor %ebx,%ebx
30897+1234:
30898+#endif
30899+
30900+#endif
30901+
30902+1: __copyuser_seg movb %al,(_DEST)
30903 xor %eax,%eax
30904 EXIT
30905 ENDPROC(__put_user_1)
30906
30907 ENTRY(__put_user_2)
30908 ENTER
30909+
30910+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30911+ GET_THREAD_INFO(%_ASM_BX)
30912 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30913 sub $1,%_ASM_BX
30914 cmp %_ASM_BX,%_ASM_CX
30915 jae bad_put_user
30916 ASM_STAC
30917-2: movw %ax,(%_ASM_CX)
30918+
30919+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30920+ mov pax_user_shadow_base,%_ASM_BX
30921+ cmp %_ASM_BX,%_ASM_CX
30922+ jb 1234f
30923+ xor %ebx,%ebx
30924+1234:
30925+#endif
30926+
30927+#endif
30928+
30929+2: __copyuser_seg movw %ax,(_DEST)
30930 xor %eax,%eax
30931 EXIT
30932 ENDPROC(__put_user_2)
30933
30934 ENTRY(__put_user_4)
30935 ENTER
30936+
30937+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30938+ GET_THREAD_INFO(%_ASM_BX)
30939 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30940 sub $3,%_ASM_BX
30941 cmp %_ASM_BX,%_ASM_CX
30942 jae bad_put_user
30943 ASM_STAC
30944-3: movl %eax,(%_ASM_CX)
30945+
30946+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30947+ mov pax_user_shadow_base,%_ASM_BX
30948+ cmp %_ASM_BX,%_ASM_CX
30949+ jb 1234f
30950+ xor %ebx,%ebx
30951+1234:
30952+#endif
30953+
30954+#endif
30955+
30956+3: __copyuser_seg movl %eax,(_DEST)
30957 xor %eax,%eax
30958 EXIT
30959 ENDPROC(__put_user_4)
30960
30961 ENTRY(__put_user_8)
30962 ENTER
30963+
30964+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30965+ GET_THREAD_INFO(%_ASM_BX)
30966 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30967 sub $7,%_ASM_BX
30968 cmp %_ASM_BX,%_ASM_CX
30969 jae bad_put_user
30970 ASM_STAC
30971-4: mov %_ASM_AX,(%_ASM_CX)
30972+
30973+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30974+ mov pax_user_shadow_base,%_ASM_BX
30975+ cmp %_ASM_BX,%_ASM_CX
30976+ jb 1234f
30977+ xor %ebx,%ebx
30978+1234:
30979+#endif
30980+
30981+#endif
30982+
30983+4: __copyuser_seg mov %_ASM_AX,(_DEST)
30984 #ifdef CONFIG_X86_32
30985-5: movl %edx,4(%_ASM_CX)
30986+5: __copyuser_seg movl %edx,4(_DEST)
30987 #endif
30988 xor %eax,%eax
30989 EXIT
30990diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
30991index 1cad221..de671ee 100644
30992--- a/arch/x86/lib/rwlock.S
30993+++ b/arch/x86/lib/rwlock.S
30994@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
30995 FRAME
30996 0: LOCK_PREFIX
30997 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
30998+
30999+#ifdef CONFIG_PAX_REFCOUNT
31000+ jno 1234f
31001+ LOCK_PREFIX
31002+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
31003+ int $4
31004+1234:
31005+ _ASM_EXTABLE(1234b, 1234b)
31006+#endif
31007+
31008 1: rep; nop
31009 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
31010 jne 1b
31011 LOCK_PREFIX
31012 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
31013+
31014+#ifdef CONFIG_PAX_REFCOUNT
31015+ jno 1234f
31016+ LOCK_PREFIX
31017+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
31018+ int $4
31019+1234:
31020+ _ASM_EXTABLE(1234b, 1234b)
31021+#endif
31022+
31023 jnz 0b
31024 ENDFRAME
31025+ pax_force_retaddr
31026 ret
31027 CFI_ENDPROC
31028 END(__write_lock_failed)
31029@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
31030 FRAME
31031 0: LOCK_PREFIX
31032 READ_LOCK_SIZE(inc) (%__lock_ptr)
31033+
31034+#ifdef CONFIG_PAX_REFCOUNT
31035+ jno 1234f
31036+ LOCK_PREFIX
31037+ READ_LOCK_SIZE(dec) (%__lock_ptr)
31038+ int $4
31039+1234:
31040+ _ASM_EXTABLE(1234b, 1234b)
31041+#endif
31042+
31043 1: rep; nop
31044 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
31045 js 1b
31046 LOCK_PREFIX
31047 READ_LOCK_SIZE(dec) (%__lock_ptr)
31048+
31049+#ifdef CONFIG_PAX_REFCOUNT
31050+ jno 1234f
31051+ LOCK_PREFIX
31052+ READ_LOCK_SIZE(inc) (%__lock_ptr)
31053+ int $4
31054+1234:
31055+ _ASM_EXTABLE(1234b, 1234b)
31056+#endif
31057+
31058 js 0b
31059 ENDFRAME
31060+ pax_force_retaddr
31061 ret
31062 CFI_ENDPROC
31063 END(__read_lock_failed)
31064diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
31065index 5dff5f0..cadebf4 100644
31066--- a/arch/x86/lib/rwsem.S
31067+++ b/arch/x86/lib/rwsem.S
31068@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
31069 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31070 CFI_RESTORE __ASM_REG(dx)
31071 restore_common_regs
31072+ pax_force_retaddr
31073 ret
31074 CFI_ENDPROC
31075 ENDPROC(call_rwsem_down_read_failed)
31076@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
31077 movq %rax,%rdi
31078 call rwsem_down_write_failed
31079 restore_common_regs
31080+ pax_force_retaddr
31081 ret
31082 CFI_ENDPROC
31083 ENDPROC(call_rwsem_down_write_failed)
31084@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
31085 movq %rax,%rdi
31086 call rwsem_wake
31087 restore_common_regs
31088-1: ret
31089+1: pax_force_retaddr
31090+ ret
31091 CFI_ENDPROC
31092 ENDPROC(call_rwsem_wake)
31093
31094@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
31095 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31096 CFI_RESTORE __ASM_REG(dx)
31097 restore_common_regs
31098+ pax_force_retaddr
31099 ret
31100 CFI_ENDPROC
31101 ENDPROC(call_rwsem_downgrade_wake)
31102diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31103index 92d9fea..b2762c8 100644
31104--- a/arch/x86/lib/thunk_64.S
31105+++ b/arch/x86/lib/thunk_64.S
31106@@ -9,6 +9,7 @@
31107 #include <asm/dwarf2.h>
31108 #include <asm/calling.h>
31109 #include <asm/asm.h>
31110+#include <asm/alternative-asm.h>
31111
31112 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31113 .macro THUNK name, func, put_ret_addr_in_rdi=0
31114@@ -16,11 +17,11 @@
31115 \name:
31116 CFI_STARTPROC
31117
31118- /* this one pushes 9 elems, the next one would be %rIP */
31119- SAVE_ARGS
31120+ /* this one pushes 15+1 elems, the next one would be %rIP */
31121+ SAVE_ARGS 8
31122
31123 .if \put_ret_addr_in_rdi
31124- movq_cfi_restore 9*8, rdi
31125+ movq_cfi_restore RIP, rdi
31126 .endif
31127
31128 call \func
31129@@ -40,9 +41,10 @@
31130
31131 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31132 CFI_STARTPROC
31133- SAVE_ARGS
31134+ SAVE_ARGS 8
31135 restore:
31136- RESTORE_ARGS
31137+ RESTORE_ARGS 1,8
31138+ pax_force_retaddr
31139 ret
31140 CFI_ENDPROC
31141 _ASM_NOKPROBE(restore)
31142diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31143index e2f5e21..4b22130 100644
31144--- a/arch/x86/lib/usercopy_32.c
31145+++ b/arch/x86/lib/usercopy_32.c
31146@@ -42,11 +42,13 @@ do { \
31147 int __d0; \
31148 might_fault(); \
31149 __asm__ __volatile__( \
31150+ __COPYUSER_SET_ES \
31151 ASM_STAC "\n" \
31152 "0: rep; stosl\n" \
31153 " movl %2,%0\n" \
31154 "1: rep; stosb\n" \
31155 "2: " ASM_CLAC "\n" \
31156+ __COPYUSER_RESTORE_ES \
31157 ".section .fixup,\"ax\"\n" \
31158 "3: lea 0(%2,%0,4),%0\n" \
31159 " jmp 2b\n" \
31160@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31161
31162 #ifdef CONFIG_X86_INTEL_USERCOPY
31163 static unsigned long
31164-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31165+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31166 {
31167 int d0, d1;
31168 __asm__ __volatile__(
31169@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31170 " .align 2,0x90\n"
31171 "3: movl 0(%4), %%eax\n"
31172 "4: movl 4(%4), %%edx\n"
31173- "5: movl %%eax, 0(%3)\n"
31174- "6: movl %%edx, 4(%3)\n"
31175+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31176+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31177 "7: movl 8(%4), %%eax\n"
31178 "8: movl 12(%4),%%edx\n"
31179- "9: movl %%eax, 8(%3)\n"
31180- "10: movl %%edx, 12(%3)\n"
31181+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31182+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31183 "11: movl 16(%4), %%eax\n"
31184 "12: movl 20(%4), %%edx\n"
31185- "13: movl %%eax, 16(%3)\n"
31186- "14: movl %%edx, 20(%3)\n"
31187+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31188+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31189 "15: movl 24(%4), %%eax\n"
31190 "16: movl 28(%4), %%edx\n"
31191- "17: movl %%eax, 24(%3)\n"
31192- "18: movl %%edx, 28(%3)\n"
31193+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31194+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31195 "19: movl 32(%4), %%eax\n"
31196 "20: movl 36(%4), %%edx\n"
31197- "21: movl %%eax, 32(%3)\n"
31198- "22: movl %%edx, 36(%3)\n"
31199+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31200+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31201 "23: movl 40(%4), %%eax\n"
31202 "24: movl 44(%4), %%edx\n"
31203- "25: movl %%eax, 40(%3)\n"
31204- "26: movl %%edx, 44(%3)\n"
31205+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31206+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31207 "27: movl 48(%4), %%eax\n"
31208 "28: movl 52(%4), %%edx\n"
31209- "29: movl %%eax, 48(%3)\n"
31210- "30: movl %%edx, 52(%3)\n"
31211+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31212+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31213 "31: movl 56(%4), %%eax\n"
31214 "32: movl 60(%4), %%edx\n"
31215- "33: movl %%eax, 56(%3)\n"
31216- "34: movl %%edx, 60(%3)\n"
31217+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31218+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31219 " addl $-64, %0\n"
31220 " addl $64, %4\n"
31221 " addl $64, %3\n"
31222@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31223 " shrl $2, %0\n"
31224 " andl $3, %%eax\n"
31225 " cld\n"
31226+ __COPYUSER_SET_ES
31227 "99: rep; movsl\n"
31228 "36: movl %%eax, %0\n"
31229 "37: rep; movsb\n"
31230 "100:\n"
31231+ __COPYUSER_RESTORE_ES
31232+ ".section .fixup,\"ax\"\n"
31233+ "101: lea 0(%%eax,%0,4),%0\n"
31234+ " jmp 100b\n"
31235+ ".previous\n"
31236+ _ASM_EXTABLE(1b,100b)
31237+ _ASM_EXTABLE(2b,100b)
31238+ _ASM_EXTABLE(3b,100b)
31239+ _ASM_EXTABLE(4b,100b)
31240+ _ASM_EXTABLE(5b,100b)
31241+ _ASM_EXTABLE(6b,100b)
31242+ _ASM_EXTABLE(7b,100b)
31243+ _ASM_EXTABLE(8b,100b)
31244+ _ASM_EXTABLE(9b,100b)
31245+ _ASM_EXTABLE(10b,100b)
31246+ _ASM_EXTABLE(11b,100b)
31247+ _ASM_EXTABLE(12b,100b)
31248+ _ASM_EXTABLE(13b,100b)
31249+ _ASM_EXTABLE(14b,100b)
31250+ _ASM_EXTABLE(15b,100b)
31251+ _ASM_EXTABLE(16b,100b)
31252+ _ASM_EXTABLE(17b,100b)
31253+ _ASM_EXTABLE(18b,100b)
31254+ _ASM_EXTABLE(19b,100b)
31255+ _ASM_EXTABLE(20b,100b)
31256+ _ASM_EXTABLE(21b,100b)
31257+ _ASM_EXTABLE(22b,100b)
31258+ _ASM_EXTABLE(23b,100b)
31259+ _ASM_EXTABLE(24b,100b)
31260+ _ASM_EXTABLE(25b,100b)
31261+ _ASM_EXTABLE(26b,100b)
31262+ _ASM_EXTABLE(27b,100b)
31263+ _ASM_EXTABLE(28b,100b)
31264+ _ASM_EXTABLE(29b,100b)
31265+ _ASM_EXTABLE(30b,100b)
31266+ _ASM_EXTABLE(31b,100b)
31267+ _ASM_EXTABLE(32b,100b)
31268+ _ASM_EXTABLE(33b,100b)
31269+ _ASM_EXTABLE(34b,100b)
31270+ _ASM_EXTABLE(35b,100b)
31271+ _ASM_EXTABLE(36b,100b)
31272+ _ASM_EXTABLE(37b,100b)
31273+ _ASM_EXTABLE(99b,101b)
31274+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31275+ : "1"(to), "2"(from), "0"(size)
31276+ : "eax", "edx", "memory");
31277+ return size;
31278+}
31279+
31280+static unsigned long
31281+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31282+{
31283+ int d0, d1;
31284+ __asm__ __volatile__(
31285+ " .align 2,0x90\n"
31286+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31287+ " cmpl $67, %0\n"
31288+ " jbe 3f\n"
31289+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31290+ " .align 2,0x90\n"
31291+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31292+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31293+ "5: movl %%eax, 0(%3)\n"
31294+ "6: movl %%edx, 4(%3)\n"
31295+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31296+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31297+ "9: movl %%eax, 8(%3)\n"
31298+ "10: movl %%edx, 12(%3)\n"
31299+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31300+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31301+ "13: movl %%eax, 16(%3)\n"
31302+ "14: movl %%edx, 20(%3)\n"
31303+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31304+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31305+ "17: movl %%eax, 24(%3)\n"
31306+ "18: movl %%edx, 28(%3)\n"
31307+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31308+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31309+ "21: movl %%eax, 32(%3)\n"
31310+ "22: movl %%edx, 36(%3)\n"
31311+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31312+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31313+ "25: movl %%eax, 40(%3)\n"
31314+ "26: movl %%edx, 44(%3)\n"
31315+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31316+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31317+ "29: movl %%eax, 48(%3)\n"
31318+ "30: movl %%edx, 52(%3)\n"
31319+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31320+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31321+ "33: movl %%eax, 56(%3)\n"
31322+ "34: movl %%edx, 60(%3)\n"
31323+ " addl $-64, %0\n"
31324+ " addl $64, %4\n"
31325+ " addl $64, %3\n"
31326+ " cmpl $63, %0\n"
31327+ " ja 1b\n"
31328+ "35: movl %0, %%eax\n"
31329+ " shrl $2, %0\n"
31330+ " andl $3, %%eax\n"
31331+ " cld\n"
31332+ "99: rep; "__copyuser_seg" movsl\n"
31333+ "36: movl %%eax, %0\n"
31334+ "37: rep; "__copyuser_seg" movsb\n"
31335+ "100:\n"
31336 ".section .fixup,\"ax\"\n"
31337 "101: lea 0(%%eax,%0,4),%0\n"
31338 " jmp 100b\n"
31339@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31340 int d0, d1;
31341 __asm__ __volatile__(
31342 " .align 2,0x90\n"
31343- "0: movl 32(%4), %%eax\n"
31344+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31345 " cmpl $67, %0\n"
31346 " jbe 2f\n"
31347- "1: movl 64(%4), %%eax\n"
31348+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31349 " .align 2,0x90\n"
31350- "2: movl 0(%4), %%eax\n"
31351- "21: movl 4(%4), %%edx\n"
31352+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31353+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31354 " movl %%eax, 0(%3)\n"
31355 " movl %%edx, 4(%3)\n"
31356- "3: movl 8(%4), %%eax\n"
31357- "31: movl 12(%4),%%edx\n"
31358+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31359+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31360 " movl %%eax, 8(%3)\n"
31361 " movl %%edx, 12(%3)\n"
31362- "4: movl 16(%4), %%eax\n"
31363- "41: movl 20(%4), %%edx\n"
31364+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31365+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31366 " movl %%eax, 16(%3)\n"
31367 " movl %%edx, 20(%3)\n"
31368- "10: movl 24(%4), %%eax\n"
31369- "51: movl 28(%4), %%edx\n"
31370+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31371+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31372 " movl %%eax, 24(%3)\n"
31373 " movl %%edx, 28(%3)\n"
31374- "11: movl 32(%4), %%eax\n"
31375- "61: movl 36(%4), %%edx\n"
31376+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31377+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31378 " movl %%eax, 32(%3)\n"
31379 " movl %%edx, 36(%3)\n"
31380- "12: movl 40(%4), %%eax\n"
31381- "71: movl 44(%4), %%edx\n"
31382+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31383+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31384 " movl %%eax, 40(%3)\n"
31385 " movl %%edx, 44(%3)\n"
31386- "13: movl 48(%4), %%eax\n"
31387- "81: movl 52(%4), %%edx\n"
31388+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31389+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31390 " movl %%eax, 48(%3)\n"
31391 " movl %%edx, 52(%3)\n"
31392- "14: movl 56(%4), %%eax\n"
31393- "91: movl 60(%4), %%edx\n"
31394+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31395+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31396 " movl %%eax, 56(%3)\n"
31397 " movl %%edx, 60(%3)\n"
31398 " addl $-64, %0\n"
31399@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31400 " shrl $2, %0\n"
31401 " andl $3, %%eax\n"
31402 " cld\n"
31403- "6: rep; movsl\n"
31404+ "6: rep; "__copyuser_seg" movsl\n"
31405 " movl %%eax,%0\n"
31406- "7: rep; movsb\n"
31407+ "7: rep; "__copyuser_seg" movsb\n"
31408 "8:\n"
31409 ".section .fixup,\"ax\"\n"
31410 "9: lea 0(%%eax,%0,4),%0\n"
31411@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31412
31413 __asm__ __volatile__(
31414 " .align 2,0x90\n"
31415- "0: movl 32(%4), %%eax\n"
31416+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31417 " cmpl $67, %0\n"
31418 " jbe 2f\n"
31419- "1: movl 64(%4), %%eax\n"
31420+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31421 " .align 2,0x90\n"
31422- "2: movl 0(%4), %%eax\n"
31423- "21: movl 4(%4), %%edx\n"
31424+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31425+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31426 " movnti %%eax, 0(%3)\n"
31427 " movnti %%edx, 4(%3)\n"
31428- "3: movl 8(%4), %%eax\n"
31429- "31: movl 12(%4),%%edx\n"
31430+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31431+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31432 " movnti %%eax, 8(%3)\n"
31433 " movnti %%edx, 12(%3)\n"
31434- "4: movl 16(%4), %%eax\n"
31435- "41: movl 20(%4), %%edx\n"
31436+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31437+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31438 " movnti %%eax, 16(%3)\n"
31439 " movnti %%edx, 20(%3)\n"
31440- "10: movl 24(%4), %%eax\n"
31441- "51: movl 28(%4), %%edx\n"
31442+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31443+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31444 " movnti %%eax, 24(%3)\n"
31445 " movnti %%edx, 28(%3)\n"
31446- "11: movl 32(%4), %%eax\n"
31447- "61: movl 36(%4), %%edx\n"
31448+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31449+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31450 " movnti %%eax, 32(%3)\n"
31451 " movnti %%edx, 36(%3)\n"
31452- "12: movl 40(%4), %%eax\n"
31453- "71: movl 44(%4), %%edx\n"
31454+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31455+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31456 " movnti %%eax, 40(%3)\n"
31457 " movnti %%edx, 44(%3)\n"
31458- "13: movl 48(%4), %%eax\n"
31459- "81: movl 52(%4), %%edx\n"
31460+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31461+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31462 " movnti %%eax, 48(%3)\n"
31463 " movnti %%edx, 52(%3)\n"
31464- "14: movl 56(%4), %%eax\n"
31465- "91: movl 60(%4), %%edx\n"
31466+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31467+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31468 " movnti %%eax, 56(%3)\n"
31469 " movnti %%edx, 60(%3)\n"
31470 " addl $-64, %0\n"
31471@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31472 " shrl $2, %0\n"
31473 " andl $3, %%eax\n"
31474 " cld\n"
31475- "6: rep; movsl\n"
31476+ "6: rep; "__copyuser_seg" movsl\n"
31477 " movl %%eax,%0\n"
31478- "7: rep; movsb\n"
31479+ "7: rep; "__copyuser_seg" movsb\n"
31480 "8:\n"
31481 ".section .fixup,\"ax\"\n"
31482 "9: lea 0(%%eax,%0,4),%0\n"
31483@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31484
31485 __asm__ __volatile__(
31486 " .align 2,0x90\n"
31487- "0: movl 32(%4), %%eax\n"
31488+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31489 " cmpl $67, %0\n"
31490 " jbe 2f\n"
31491- "1: movl 64(%4), %%eax\n"
31492+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31493 " .align 2,0x90\n"
31494- "2: movl 0(%4), %%eax\n"
31495- "21: movl 4(%4), %%edx\n"
31496+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31497+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31498 " movnti %%eax, 0(%3)\n"
31499 " movnti %%edx, 4(%3)\n"
31500- "3: movl 8(%4), %%eax\n"
31501- "31: movl 12(%4),%%edx\n"
31502+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31503+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31504 " movnti %%eax, 8(%3)\n"
31505 " movnti %%edx, 12(%3)\n"
31506- "4: movl 16(%4), %%eax\n"
31507- "41: movl 20(%4), %%edx\n"
31508+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31509+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31510 " movnti %%eax, 16(%3)\n"
31511 " movnti %%edx, 20(%3)\n"
31512- "10: movl 24(%4), %%eax\n"
31513- "51: movl 28(%4), %%edx\n"
31514+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31515+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31516 " movnti %%eax, 24(%3)\n"
31517 " movnti %%edx, 28(%3)\n"
31518- "11: movl 32(%4), %%eax\n"
31519- "61: movl 36(%4), %%edx\n"
31520+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31521+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31522 " movnti %%eax, 32(%3)\n"
31523 " movnti %%edx, 36(%3)\n"
31524- "12: movl 40(%4), %%eax\n"
31525- "71: movl 44(%4), %%edx\n"
31526+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31527+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31528 " movnti %%eax, 40(%3)\n"
31529 " movnti %%edx, 44(%3)\n"
31530- "13: movl 48(%4), %%eax\n"
31531- "81: movl 52(%4), %%edx\n"
31532+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31533+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31534 " movnti %%eax, 48(%3)\n"
31535 " movnti %%edx, 52(%3)\n"
31536- "14: movl 56(%4), %%eax\n"
31537- "91: movl 60(%4), %%edx\n"
31538+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31539+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31540 " movnti %%eax, 56(%3)\n"
31541 " movnti %%edx, 60(%3)\n"
31542 " addl $-64, %0\n"
31543@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31544 " shrl $2, %0\n"
31545 " andl $3, %%eax\n"
31546 " cld\n"
31547- "6: rep; movsl\n"
31548+ "6: rep; "__copyuser_seg" movsl\n"
31549 " movl %%eax,%0\n"
31550- "7: rep; movsb\n"
31551+ "7: rep; "__copyuser_seg" movsb\n"
31552 "8:\n"
31553 ".section .fixup,\"ax\"\n"
31554 "9: lea 0(%%eax,%0,4),%0\n"
31555@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31556 */
31557 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31558 unsigned long size);
31559-unsigned long __copy_user_intel(void __user *to, const void *from,
31560+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31561+ unsigned long size);
31562+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31563 unsigned long size);
31564 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31565 const void __user *from, unsigned long size);
31566 #endif /* CONFIG_X86_INTEL_USERCOPY */
31567
31568 /* Generic arbitrary sized copy. */
31569-#define __copy_user(to, from, size) \
31570+#define __copy_user(to, from, size, prefix, set, restore) \
31571 do { \
31572 int __d0, __d1, __d2; \
31573 __asm__ __volatile__( \
31574+ set \
31575 " cmp $7,%0\n" \
31576 " jbe 1f\n" \
31577 " movl %1,%0\n" \
31578 " negl %0\n" \
31579 " andl $7,%0\n" \
31580 " subl %0,%3\n" \
31581- "4: rep; movsb\n" \
31582+ "4: rep; "prefix"movsb\n" \
31583 " movl %3,%0\n" \
31584 " shrl $2,%0\n" \
31585 " andl $3,%3\n" \
31586 " .align 2,0x90\n" \
31587- "0: rep; movsl\n" \
31588+ "0: rep; "prefix"movsl\n" \
31589 " movl %3,%0\n" \
31590- "1: rep; movsb\n" \
31591+ "1: rep; "prefix"movsb\n" \
31592 "2:\n" \
31593+ restore \
31594 ".section .fixup,\"ax\"\n" \
31595 "5: addl %3,%0\n" \
31596 " jmp 2b\n" \
31597@@ -538,14 +650,14 @@ do { \
31598 " negl %0\n" \
31599 " andl $7,%0\n" \
31600 " subl %0,%3\n" \
31601- "4: rep; movsb\n" \
31602+ "4: rep; "__copyuser_seg"movsb\n" \
31603 " movl %3,%0\n" \
31604 " shrl $2,%0\n" \
31605 " andl $3,%3\n" \
31606 " .align 2,0x90\n" \
31607- "0: rep; movsl\n" \
31608+ "0: rep; "__copyuser_seg"movsl\n" \
31609 " movl %3,%0\n" \
31610- "1: rep; movsb\n" \
31611+ "1: rep; "__copyuser_seg"movsb\n" \
31612 "2:\n" \
31613 ".section .fixup,\"ax\"\n" \
31614 "5: addl %3,%0\n" \
31615@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31616 {
31617 stac();
31618 if (movsl_is_ok(to, from, n))
31619- __copy_user(to, from, n);
31620+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31621 else
31622- n = __copy_user_intel(to, from, n);
31623+ n = __generic_copy_to_user_intel(to, from, n);
31624 clac();
31625 return n;
31626 }
31627@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31628 {
31629 stac();
31630 if (movsl_is_ok(to, from, n))
31631- __copy_user(to, from, n);
31632+ __copy_user(to, from, n, __copyuser_seg, "", "");
31633 else
31634- n = __copy_user_intel((void __user *)to,
31635- (const void *)from, n);
31636+ n = __generic_copy_from_user_intel(to, from, n);
31637 clac();
31638 return n;
31639 }
31640@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31641 if (n > 64 && cpu_has_xmm2)
31642 n = __copy_user_intel_nocache(to, from, n);
31643 else
31644- __copy_user(to, from, n);
31645+ __copy_user(to, from, n, __copyuser_seg, "", "");
31646 #else
31647- __copy_user(to, from, n);
31648+ __copy_user(to, from, n, __copyuser_seg, "", "");
31649 #endif
31650 clac();
31651 return n;
31652 }
31653 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31654
31655-/**
31656- * copy_to_user: - Copy a block of data into user space.
31657- * @to: Destination address, in user space.
31658- * @from: Source address, in kernel space.
31659- * @n: Number of bytes to copy.
31660- *
31661- * Context: User context only. This function may sleep.
31662- *
31663- * Copy data from kernel space to user space.
31664- *
31665- * Returns number of bytes that could not be copied.
31666- * On success, this will be zero.
31667- */
31668-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31669+#ifdef CONFIG_PAX_MEMORY_UDEREF
31670+void __set_fs(mm_segment_t x)
31671 {
31672- if (access_ok(VERIFY_WRITE, to, n))
31673- n = __copy_to_user(to, from, n);
31674- return n;
31675+ switch (x.seg) {
31676+ case 0:
31677+ loadsegment(gs, 0);
31678+ break;
31679+ case TASK_SIZE_MAX:
31680+ loadsegment(gs, __USER_DS);
31681+ break;
31682+ case -1UL:
31683+ loadsegment(gs, __KERNEL_DS);
31684+ break;
31685+ default:
31686+ BUG();
31687+ }
31688 }
31689-EXPORT_SYMBOL(_copy_to_user);
31690+EXPORT_SYMBOL(__set_fs);
31691
31692-/**
31693- * copy_from_user: - Copy a block of data from user space.
31694- * @to: Destination address, in kernel space.
31695- * @from: Source address, in user space.
31696- * @n: Number of bytes to copy.
31697- *
31698- * Context: User context only. This function may sleep.
31699- *
31700- * Copy data from user space to kernel space.
31701- *
31702- * Returns number of bytes that could not be copied.
31703- * On success, this will be zero.
31704- *
31705- * If some data could not be copied, this function will pad the copied
31706- * data to the requested size using zero bytes.
31707- */
31708-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31709+void set_fs(mm_segment_t x)
31710 {
31711- if (access_ok(VERIFY_READ, from, n))
31712- n = __copy_from_user(to, from, n);
31713- else
31714- memset(to, 0, n);
31715- return n;
31716+ current_thread_info()->addr_limit = x;
31717+ __set_fs(x);
31718 }
31719-EXPORT_SYMBOL(_copy_from_user);
31720+EXPORT_SYMBOL(set_fs);
31721+#endif
31722diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31723index c905e89..01ab928 100644
31724--- a/arch/x86/lib/usercopy_64.c
31725+++ b/arch/x86/lib/usercopy_64.c
31726@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31727 might_fault();
31728 /* no memory constraint because it doesn't change any memory gcc knows
31729 about */
31730+ pax_open_userland();
31731 stac();
31732 asm volatile(
31733 " testq %[size8],%[size8]\n"
31734@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31735 _ASM_EXTABLE(0b,3b)
31736 _ASM_EXTABLE(1b,2b)
31737 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31738- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31739+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31740 [zero] "r" (0UL), [eight] "r" (8UL));
31741 clac();
31742+ pax_close_userland();
31743 return size;
31744 }
31745 EXPORT_SYMBOL(__clear_user);
31746@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31747 }
31748 EXPORT_SYMBOL(clear_user);
31749
31750-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31751+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31752 {
31753- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31754- return copy_user_generic((__force void *)to, (__force void *)from, len);
31755- }
31756- return len;
31757+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31758+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31759+ return len;
31760 }
31761 EXPORT_SYMBOL(copy_in_user);
31762
31763@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
31764 * it is not necessary to optimize tail handling.
31765 */
31766 __visible unsigned long
31767-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31768+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
31769 {
31770 char c;
31771 unsigned zero_len;
31772
31773+ clac();
31774+ pax_close_userland();
31775 for (; len; --len, to++) {
31776 if (__get_user_nocheck(c, from++, sizeof(char)))
31777 break;
31778@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31779 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
31780 if (__put_user_nocheck(c, to++, sizeof(char)))
31781 break;
31782- clac();
31783 return len;
31784 }
31785diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31786index 6a19ad9..1c48f9a 100644
31787--- a/arch/x86/mm/Makefile
31788+++ b/arch/x86/mm/Makefile
31789@@ -30,3 +30,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
31790 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31791
31792 obj-$(CONFIG_MEMTEST) += memtest.o
31793+
31794+quote:="
31795+obj-$(CONFIG_X86_64) += uderef_64.o
31796+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31797diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31798index 903ec1e..c4166b2 100644
31799--- a/arch/x86/mm/extable.c
31800+++ b/arch/x86/mm/extable.c
31801@@ -6,12 +6,24 @@
31802 static inline unsigned long
31803 ex_insn_addr(const struct exception_table_entry *x)
31804 {
31805- return (unsigned long)&x->insn + x->insn;
31806+ unsigned long reloc = 0;
31807+
31808+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31809+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31810+#endif
31811+
31812+ return (unsigned long)&x->insn + x->insn + reloc;
31813 }
31814 static inline unsigned long
31815 ex_fixup_addr(const struct exception_table_entry *x)
31816 {
31817- return (unsigned long)&x->fixup + x->fixup;
31818+ unsigned long reloc = 0;
31819+
31820+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31821+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31822+#endif
31823+
31824+ return (unsigned long)&x->fixup + x->fixup + reloc;
31825 }
31826
31827 int fixup_exception(struct pt_regs *regs)
31828@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31829 unsigned long new_ip;
31830
31831 #ifdef CONFIG_PNPBIOS
31832- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31833+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31834 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31835 extern u32 pnp_bios_is_utter_crap;
31836 pnp_bios_is_utter_crap = 1;
31837@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31838 i += 4;
31839 p->fixup -= i;
31840 i += 4;
31841+
31842+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31843+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31844+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31845+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31846+#endif
31847+
31848 }
31849 }
31850
31851diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31852index 3664279..c6a7830 100644
31853--- a/arch/x86/mm/fault.c
31854+++ b/arch/x86/mm/fault.c
31855@@ -14,12 +14,19 @@
31856 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31857 #include <linux/prefetch.h> /* prefetchw */
31858 #include <linux/context_tracking.h> /* exception_enter(), ... */
31859+#include <linux/unistd.h>
31860+#include <linux/compiler.h>
31861
31862 #include <asm/traps.h> /* dotraplinkage, ... */
31863 #include <asm/pgalloc.h> /* pgd_*(), ... */
31864 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31865 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31866 #include <asm/vsyscall.h> /* emulate_vsyscall */
31867+#include <asm/tlbflush.h>
31868+
31869+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31870+#include <asm/stacktrace.h>
31871+#endif
31872
31873 #define CREATE_TRACE_POINTS
31874 #include <asm/trace/exceptions.h>
31875@@ -60,7 +67,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31876 int ret = 0;
31877
31878 /* kprobe_running() needs smp_processor_id() */
31879- if (kprobes_built_in() && !user_mode_vm(regs)) {
31880+ if (kprobes_built_in() && !user_mode(regs)) {
31881 preempt_disable();
31882 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31883 ret = 1;
31884@@ -121,7 +128,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31885 return !instr_lo || (instr_lo>>1) == 1;
31886 case 0x00:
31887 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31888- if (probe_kernel_address(instr, opcode))
31889+ if (user_mode(regs)) {
31890+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31891+ return 0;
31892+ } else if (probe_kernel_address(instr, opcode))
31893 return 0;
31894
31895 *prefetch = (instr_lo == 0xF) &&
31896@@ -155,7 +165,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31897 while (instr < max_instr) {
31898 unsigned char opcode;
31899
31900- if (probe_kernel_address(instr, opcode))
31901+ if (user_mode(regs)) {
31902+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31903+ break;
31904+ } else if (probe_kernel_address(instr, opcode))
31905 break;
31906
31907 instr++;
31908@@ -186,6 +199,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31909 force_sig_info(si_signo, &info, tsk);
31910 }
31911
31912+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31913+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31914+#endif
31915+
31916+#ifdef CONFIG_PAX_EMUTRAMP
31917+static int pax_handle_fetch_fault(struct pt_regs *regs);
31918+#endif
31919+
31920+#ifdef CONFIG_PAX_PAGEEXEC
31921+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31922+{
31923+ pgd_t *pgd;
31924+ pud_t *pud;
31925+ pmd_t *pmd;
31926+
31927+ pgd = pgd_offset(mm, address);
31928+ if (!pgd_present(*pgd))
31929+ return NULL;
31930+ pud = pud_offset(pgd, address);
31931+ if (!pud_present(*pud))
31932+ return NULL;
31933+ pmd = pmd_offset(pud, address);
31934+ if (!pmd_present(*pmd))
31935+ return NULL;
31936+ return pmd;
31937+}
31938+#endif
31939+
31940 DEFINE_SPINLOCK(pgd_lock);
31941 LIST_HEAD(pgd_list);
31942
31943@@ -236,10 +277,27 @@ void vmalloc_sync_all(void)
31944 for (address = VMALLOC_START & PMD_MASK;
31945 address >= TASK_SIZE && address < FIXADDR_TOP;
31946 address += PMD_SIZE) {
31947+
31948+#ifdef CONFIG_PAX_PER_CPU_PGD
31949+ unsigned long cpu;
31950+#else
31951 struct page *page;
31952+#endif
31953
31954 spin_lock(&pgd_lock);
31955+
31956+#ifdef CONFIG_PAX_PER_CPU_PGD
31957+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31958+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31959+ pmd_t *ret;
31960+
31961+ ret = vmalloc_sync_one(pgd, address);
31962+ if (!ret)
31963+ break;
31964+ pgd = get_cpu_pgd(cpu, kernel);
31965+#else
31966 list_for_each_entry(page, &pgd_list, lru) {
31967+ pgd_t *pgd;
31968 spinlock_t *pgt_lock;
31969 pmd_t *ret;
31970
31971@@ -247,8 +305,14 @@ void vmalloc_sync_all(void)
31972 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31973
31974 spin_lock(pgt_lock);
31975- ret = vmalloc_sync_one(page_address(page), address);
31976+ pgd = page_address(page);
31977+#endif
31978+
31979+ ret = vmalloc_sync_one(pgd, address);
31980+
31981+#ifndef CONFIG_PAX_PER_CPU_PGD
31982 spin_unlock(pgt_lock);
31983+#endif
31984
31985 if (!ret)
31986 break;
31987@@ -282,6 +346,12 @@ static noinline int vmalloc_fault(unsigned long address)
31988 * an interrupt in the middle of a task switch..
31989 */
31990 pgd_paddr = read_cr3();
31991+
31992+#ifdef CONFIG_PAX_PER_CPU_PGD
31993+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31994+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31995+#endif
31996+
31997 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31998 if (!pmd_k)
31999 return -1;
32000@@ -378,11 +448,25 @@ static noinline int vmalloc_fault(unsigned long address)
32001 * happen within a race in page table update. In the later
32002 * case just flush:
32003 */
32004- pgd = pgd_offset(current->active_mm, address);
32005+
32006 pgd_ref = pgd_offset_k(address);
32007 if (pgd_none(*pgd_ref))
32008 return -1;
32009
32010+#ifdef CONFIG_PAX_PER_CPU_PGD
32011+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
32012+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
32013+ if (pgd_none(*pgd)) {
32014+ set_pgd(pgd, *pgd_ref);
32015+ arch_flush_lazy_mmu_mode();
32016+ } else {
32017+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
32018+ }
32019+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
32020+#else
32021+ pgd = pgd_offset(current->active_mm, address);
32022+#endif
32023+
32024 if (pgd_none(*pgd)) {
32025 set_pgd(pgd, *pgd_ref);
32026 arch_flush_lazy_mmu_mode();
32027@@ -549,7 +633,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
32028 static int is_errata100(struct pt_regs *regs, unsigned long address)
32029 {
32030 #ifdef CONFIG_X86_64
32031- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
32032+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
32033 return 1;
32034 #endif
32035 return 0;
32036@@ -576,7 +660,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
32037 }
32038
32039 static const char nx_warning[] = KERN_CRIT
32040-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
32041+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
32042
32043 static void
32044 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32045@@ -585,7 +669,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32046 if (!oops_may_print())
32047 return;
32048
32049- if (error_code & PF_INSTR) {
32050+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
32051 unsigned int level;
32052 pgd_t *pgd;
32053 pte_t *pte;
32054@@ -596,9 +680,21 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32055 pte = lookup_address_in_pgd(pgd, address, &level);
32056
32057 if (pte && pte_present(*pte) && !pte_exec(*pte))
32058- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
32059+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32060 }
32061
32062+#ifdef CONFIG_PAX_KERNEXEC
32063+ if (init_mm.start_code <= address && address < init_mm.end_code) {
32064+ if (current->signal->curr_ip)
32065+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
32066+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
32067+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32068+ else
32069+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
32070+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32071+ }
32072+#endif
32073+
32074 printk(KERN_ALERT "BUG: unable to handle kernel ");
32075 if (address < PAGE_SIZE)
32076 printk(KERN_CONT "NULL pointer dereference");
32077@@ -779,6 +875,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32078 return;
32079 }
32080 #endif
32081+
32082+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32083+ if (pax_is_fetch_fault(regs, error_code, address)) {
32084+
32085+#ifdef CONFIG_PAX_EMUTRAMP
32086+ switch (pax_handle_fetch_fault(regs)) {
32087+ case 2:
32088+ return;
32089+ }
32090+#endif
32091+
32092+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32093+ do_group_exit(SIGKILL);
32094+ }
32095+#endif
32096+
32097 /* Kernel addresses are always protection faults: */
32098 if (address >= TASK_SIZE)
32099 error_code |= PF_PROT;
32100@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32101 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32102 printk(KERN_ERR
32103 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32104- tsk->comm, tsk->pid, address);
32105+ tsk->comm, task_pid_nr(tsk), address);
32106 code = BUS_MCEERR_AR;
32107 }
32108 #endif
32109@@ -918,6 +1030,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32110 return 1;
32111 }
32112
32113+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32114+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32115+{
32116+ pte_t *pte;
32117+ pmd_t *pmd;
32118+ spinlock_t *ptl;
32119+ unsigned char pte_mask;
32120+
32121+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32122+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32123+ return 0;
32124+
32125+ /* PaX: it's our fault, let's handle it if we can */
32126+
32127+ /* PaX: take a look at read faults before acquiring any locks */
32128+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32129+ /* instruction fetch attempt from a protected page in user mode */
32130+ up_read(&mm->mmap_sem);
32131+
32132+#ifdef CONFIG_PAX_EMUTRAMP
32133+ switch (pax_handle_fetch_fault(regs)) {
32134+ case 2:
32135+ return 1;
32136+ }
32137+#endif
32138+
32139+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32140+ do_group_exit(SIGKILL);
32141+ }
32142+
32143+ pmd = pax_get_pmd(mm, address);
32144+ if (unlikely(!pmd))
32145+ return 0;
32146+
32147+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32148+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32149+ pte_unmap_unlock(pte, ptl);
32150+ return 0;
32151+ }
32152+
32153+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32154+ /* write attempt to a protected page in user mode */
32155+ pte_unmap_unlock(pte, ptl);
32156+ return 0;
32157+ }
32158+
32159+#ifdef CONFIG_SMP
32160+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32161+#else
32162+ if (likely(address > get_limit(regs->cs)))
32163+#endif
32164+ {
32165+ set_pte(pte, pte_mkread(*pte));
32166+ __flush_tlb_one(address);
32167+ pte_unmap_unlock(pte, ptl);
32168+ up_read(&mm->mmap_sem);
32169+ return 1;
32170+ }
32171+
32172+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32173+
32174+ /*
32175+ * PaX: fill DTLB with user rights and retry
32176+ */
32177+ __asm__ __volatile__ (
32178+ "orb %2,(%1)\n"
32179+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32180+/*
32181+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32182+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32183+ * page fault when examined during a TLB load attempt. this is true not only
32184+ * for PTEs holding a non-present entry but also present entries that will
32185+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32186+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32187+ * for our target pages since their PTEs are simply not in the TLBs at all.
32188+
32189+ * the best thing in omitting it is that we gain around 15-20% speed in the
32190+ * fast path of the page fault handler and can get rid of tracing since we
32191+ * can no longer flush unintended entries.
32192+ */
32193+ "invlpg (%0)\n"
32194+#endif
32195+ __copyuser_seg"testb $0,(%0)\n"
32196+ "xorb %3,(%1)\n"
32197+ :
32198+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32199+ : "memory", "cc");
32200+ pte_unmap_unlock(pte, ptl);
32201+ up_read(&mm->mmap_sem);
32202+ return 1;
32203+}
32204+#endif
32205+
32206 /*
32207 * Handle a spurious fault caused by a stale TLB entry.
32208 *
32209@@ -985,6 +1190,9 @@ int show_unhandled_signals = 1;
32210 static inline int
32211 access_error(unsigned long error_code, struct vm_area_struct *vma)
32212 {
32213+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32214+ return 1;
32215+
32216 if (error_code & PF_WRITE) {
32217 /* write, present and write, not present: */
32218 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32219@@ -1019,7 +1227,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32220 if (error_code & PF_USER)
32221 return false;
32222
32223- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32224+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32225 return false;
32226
32227 return true;
32228@@ -1047,6 +1255,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32229 tsk = current;
32230 mm = tsk->mm;
32231
32232+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32233+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32234+ if (!search_exception_tables(regs->ip)) {
32235+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32236+ bad_area_nosemaphore(regs, error_code, address);
32237+ return;
32238+ }
32239+ if (address < pax_user_shadow_base) {
32240+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32241+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32242+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32243+ } else
32244+ address -= pax_user_shadow_base;
32245+ }
32246+#endif
32247+
32248 /*
32249 * Detect and handle instructions that would cause a page fault for
32250 * both a tracked kernel page and a userspace page.
32251@@ -1124,7 +1348,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32252 * User-mode registers count as a user access even for any
32253 * potential system fault or CPU buglet:
32254 */
32255- if (user_mode_vm(regs)) {
32256+ if (user_mode(regs)) {
32257 local_irq_enable();
32258 error_code |= PF_USER;
32259 flags |= FAULT_FLAG_USER;
32260@@ -1171,6 +1395,11 @@ retry:
32261 might_sleep();
32262 }
32263
32264+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32265+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32266+ return;
32267+#endif
32268+
32269 vma = find_vma(mm, address);
32270 if (unlikely(!vma)) {
32271 bad_area(regs, error_code, address);
32272@@ -1182,18 +1411,24 @@ retry:
32273 bad_area(regs, error_code, address);
32274 return;
32275 }
32276- if (error_code & PF_USER) {
32277- /*
32278- * Accessing the stack below %sp is always a bug.
32279- * The large cushion allows instructions like enter
32280- * and pusha to work. ("enter $65535, $31" pushes
32281- * 32 pointers and then decrements %sp by 65535.)
32282- */
32283- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32284- bad_area(regs, error_code, address);
32285- return;
32286- }
32287+ /*
32288+ * Accessing the stack below %sp is always a bug.
32289+ * The large cushion allows instructions like enter
32290+ * and pusha to work. ("enter $65535, $31" pushes
32291+ * 32 pointers and then decrements %sp by 65535.)
32292+ */
32293+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32294+ bad_area(regs, error_code, address);
32295+ return;
32296 }
32297+
32298+#ifdef CONFIG_PAX_SEGMEXEC
32299+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32300+ bad_area(regs, error_code, address);
32301+ return;
32302+ }
32303+#endif
32304+
32305 if (unlikely(expand_stack(vma, address))) {
32306 bad_area(regs, error_code, address);
32307 return;
32308@@ -1309,3 +1544,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32309 }
32310 NOKPROBE_SYMBOL(trace_do_page_fault);
32311 #endif /* CONFIG_TRACING */
32312+
32313+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32314+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32315+{
32316+ struct mm_struct *mm = current->mm;
32317+ unsigned long ip = regs->ip;
32318+
32319+ if (v8086_mode(regs))
32320+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32321+
32322+#ifdef CONFIG_PAX_PAGEEXEC
32323+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32324+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32325+ return true;
32326+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32327+ return true;
32328+ return false;
32329+ }
32330+#endif
32331+
32332+#ifdef CONFIG_PAX_SEGMEXEC
32333+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32334+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32335+ return true;
32336+ return false;
32337+ }
32338+#endif
32339+
32340+ return false;
32341+}
32342+#endif
32343+
32344+#ifdef CONFIG_PAX_EMUTRAMP
32345+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32346+{
32347+ int err;
32348+
32349+ do { /* PaX: libffi trampoline emulation */
32350+ unsigned char mov, jmp;
32351+ unsigned int addr1, addr2;
32352+
32353+#ifdef CONFIG_X86_64
32354+ if ((regs->ip + 9) >> 32)
32355+ break;
32356+#endif
32357+
32358+ err = get_user(mov, (unsigned char __user *)regs->ip);
32359+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32360+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32361+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32362+
32363+ if (err)
32364+ break;
32365+
32366+ if (mov == 0xB8 && jmp == 0xE9) {
32367+ regs->ax = addr1;
32368+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32369+ return 2;
32370+ }
32371+ } while (0);
32372+
32373+ do { /* PaX: gcc trampoline emulation #1 */
32374+ unsigned char mov1, mov2;
32375+ unsigned short jmp;
32376+ unsigned int addr1, addr2;
32377+
32378+#ifdef CONFIG_X86_64
32379+ if ((regs->ip + 11) >> 32)
32380+ break;
32381+#endif
32382+
32383+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32384+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32385+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32386+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32387+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32388+
32389+ if (err)
32390+ break;
32391+
32392+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32393+ regs->cx = addr1;
32394+ regs->ax = addr2;
32395+ regs->ip = addr2;
32396+ return 2;
32397+ }
32398+ } while (0);
32399+
32400+ do { /* PaX: gcc trampoline emulation #2 */
32401+ unsigned char mov, jmp;
32402+ unsigned int addr1, addr2;
32403+
32404+#ifdef CONFIG_X86_64
32405+ if ((regs->ip + 9) >> 32)
32406+ break;
32407+#endif
32408+
32409+ err = get_user(mov, (unsigned char __user *)regs->ip);
32410+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32411+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32412+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32413+
32414+ if (err)
32415+ break;
32416+
32417+ if (mov == 0xB9 && jmp == 0xE9) {
32418+ regs->cx = addr1;
32419+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32420+ return 2;
32421+ }
32422+ } while (0);
32423+
32424+ return 1; /* PaX in action */
32425+}
32426+
32427+#ifdef CONFIG_X86_64
32428+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32429+{
32430+ int err;
32431+
32432+ do { /* PaX: libffi trampoline emulation */
32433+ unsigned short mov1, mov2, jmp1;
32434+ unsigned char stcclc, jmp2;
32435+ unsigned long addr1, addr2;
32436+
32437+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32438+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32439+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32440+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32441+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32442+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32443+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32444+
32445+ if (err)
32446+ break;
32447+
32448+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32449+ regs->r11 = addr1;
32450+ regs->r10 = addr2;
32451+ if (stcclc == 0xF8)
32452+ regs->flags &= ~X86_EFLAGS_CF;
32453+ else
32454+ regs->flags |= X86_EFLAGS_CF;
32455+ regs->ip = addr1;
32456+ return 2;
32457+ }
32458+ } while (0);
32459+
32460+ do { /* PaX: gcc trampoline emulation #1 */
32461+ unsigned short mov1, mov2, jmp1;
32462+ unsigned char jmp2;
32463+ unsigned int addr1;
32464+ unsigned long addr2;
32465+
32466+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32467+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32468+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32469+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32470+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32471+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32472+
32473+ if (err)
32474+ break;
32475+
32476+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32477+ regs->r11 = addr1;
32478+ regs->r10 = addr2;
32479+ regs->ip = addr1;
32480+ return 2;
32481+ }
32482+ } while (0);
32483+
32484+ do { /* PaX: gcc trampoline emulation #2 */
32485+ unsigned short mov1, mov2, jmp1;
32486+ unsigned char jmp2;
32487+ unsigned long addr1, addr2;
32488+
32489+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32490+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32491+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32492+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32493+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32494+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32495+
32496+ if (err)
32497+ break;
32498+
32499+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32500+ regs->r11 = addr1;
32501+ regs->r10 = addr2;
32502+ regs->ip = addr1;
32503+ return 2;
32504+ }
32505+ } while (0);
32506+
32507+ return 1; /* PaX in action */
32508+}
32509+#endif
32510+
32511+/*
32512+ * PaX: decide what to do with offenders (regs->ip = fault address)
32513+ *
32514+ * returns 1 when task should be killed
32515+ * 2 when gcc trampoline was detected
32516+ */
32517+static int pax_handle_fetch_fault(struct pt_regs *regs)
32518+{
32519+ if (v8086_mode(regs))
32520+ return 1;
32521+
32522+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32523+ return 1;
32524+
32525+#ifdef CONFIG_X86_32
32526+ return pax_handle_fetch_fault_32(regs);
32527+#else
32528+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32529+ return pax_handle_fetch_fault_32(regs);
32530+ else
32531+ return pax_handle_fetch_fault_64(regs);
32532+#endif
32533+}
32534+#endif
32535+
32536+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32537+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32538+{
32539+ long i;
32540+
32541+ printk(KERN_ERR "PAX: bytes at PC: ");
32542+ for (i = 0; i < 20; i++) {
32543+ unsigned char c;
32544+ if (get_user(c, (unsigned char __force_user *)pc+i))
32545+ printk(KERN_CONT "?? ");
32546+ else
32547+ printk(KERN_CONT "%02x ", c);
32548+ }
32549+ printk("\n");
32550+
32551+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32552+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32553+ unsigned long c;
32554+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32555+#ifdef CONFIG_X86_32
32556+ printk(KERN_CONT "???????? ");
32557+#else
32558+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32559+ printk(KERN_CONT "???????? ???????? ");
32560+ else
32561+ printk(KERN_CONT "???????????????? ");
32562+#endif
32563+ } else {
32564+#ifdef CONFIG_X86_64
32565+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32566+ printk(KERN_CONT "%08x ", (unsigned int)c);
32567+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32568+ } else
32569+#endif
32570+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32571+ }
32572+ }
32573+ printk("\n");
32574+}
32575+#endif
32576+
32577+/**
32578+ * probe_kernel_write(): safely attempt to write to a location
32579+ * @dst: address to write to
32580+ * @src: pointer to the data that shall be written
32581+ * @size: size of the data chunk
32582+ *
32583+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32584+ * happens, handle that and return -EFAULT.
32585+ */
32586+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32587+{
32588+ long ret;
32589+ mm_segment_t old_fs = get_fs();
32590+
32591+ set_fs(KERNEL_DS);
32592+ pagefault_disable();
32593+ pax_open_kernel();
32594+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32595+ pax_close_kernel();
32596+ pagefault_enable();
32597+ set_fs(old_fs);
32598+
32599+ return ret ? -EFAULT : 0;
32600+}
32601diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32602index 207d9aef..69030980 100644
32603--- a/arch/x86/mm/gup.c
32604+++ b/arch/x86/mm/gup.c
32605@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32606 addr = start;
32607 len = (unsigned long) nr_pages << PAGE_SHIFT;
32608 end = start + len;
32609- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32610+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32611 (void __user *)start, len)))
32612 return 0;
32613
32614@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32615 goto slow_irqon;
32616 #endif
32617
32618+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32619+ (void __user *)start, len)))
32620+ return 0;
32621+
32622 /*
32623 * XXX: batch / limit 'nr', to avoid large irq off latency
32624 * needs some instrumenting to determine the common sizes used by
32625diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32626index 4500142..53a363c 100644
32627--- a/arch/x86/mm/highmem_32.c
32628+++ b/arch/x86/mm/highmem_32.c
32629@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32630 idx = type + KM_TYPE_NR*smp_processor_id();
32631 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32632 BUG_ON(!pte_none(*(kmap_pte-idx)));
32633+
32634+ pax_open_kernel();
32635 set_pte(kmap_pte-idx, mk_pte(page, prot));
32636+ pax_close_kernel();
32637+
32638 arch_flush_lazy_mmu_mode();
32639
32640 return (void *)vaddr;
32641diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32642index 8b977eb..4732c33 100644
32643--- a/arch/x86/mm/hugetlbpage.c
32644+++ b/arch/x86/mm/hugetlbpage.c
32645@@ -80,23 +80,24 @@ int pud_huge(pud_t pud)
32646 #ifdef CONFIG_HUGETLB_PAGE
32647 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32648 unsigned long addr, unsigned long len,
32649- unsigned long pgoff, unsigned long flags)
32650+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32651 {
32652 struct hstate *h = hstate_file(file);
32653 struct vm_unmapped_area_info info;
32654-
32655+
32656 info.flags = 0;
32657 info.length = len;
32658 info.low_limit = current->mm->mmap_legacy_base;
32659 info.high_limit = TASK_SIZE;
32660 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32661 info.align_offset = 0;
32662+ info.threadstack_offset = offset;
32663 return vm_unmapped_area(&info);
32664 }
32665
32666 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32667 unsigned long addr0, unsigned long len,
32668- unsigned long pgoff, unsigned long flags)
32669+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32670 {
32671 struct hstate *h = hstate_file(file);
32672 struct vm_unmapped_area_info info;
32673@@ -108,6 +109,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32674 info.high_limit = current->mm->mmap_base;
32675 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32676 info.align_offset = 0;
32677+ info.threadstack_offset = offset;
32678 addr = vm_unmapped_area(&info);
32679
32680 /*
32681@@ -120,6 +122,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32682 VM_BUG_ON(addr != -ENOMEM);
32683 info.flags = 0;
32684 info.low_limit = TASK_UNMAPPED_BASE;
32685+
32686+#ifdef CONFIG_PAX_RANDMMAP
32687+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32688+ info.low_limit += current->mm->delta_mmap;
32689+#endif
32690+
32691 info.high_limit = TASK_SIZE;
32692 addr = vm_unmapped_area(&info);
32693 }
32694@@ -134,10 +142,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32695 struct hstate *h = hstate_file(file);
32696 struct mm_struct *mm = current->mm;
32697 struct vm_area_struct *vma;
32698+ unsigned long pax_task_size = TASK_SIZE;
32699+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32700
32701 if (len & ~huge_page_mask(h))
32702 return -EINVAL;
32703- if (len > TASK_SIZE)
32704+
32705+#ifdef CONFIG_PAX_SEGMEXEC
32706+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32707+ pax_task_size = SEGMEXEC_TASK_SIZE;
32708+#endif
32709+
32710+ pax_task_size -= PAGE_SIZE;
32711+
32712+ if (len > pax_task_size)
32713 return -ENOMEM;
32714
32715 if (flags & MAP_FIXED) {
32716@@ -146,19 +164,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32717 return addr;
32718 }
32719
32720+#ifdef CONFIG_PAX_RANDMMAP
32721+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32722+#endif
32723+
32724 if (addr) {
32725 addr = ALIGN(addr, huge_page_size(h));
32726 vma = find_vma(mm, addr);
32727- if (TASK_SIZE - len >= addr &&
32728- (!vma || addr + len <= vma->vm_start))
32729+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32730 return addr;
32731 }
32732 if (mm->get_unmapped_area == arch_get_unmapped_area)
32733 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32734- pgoff, flags);
32735+ pgoff, flags, offset);
32736 else
32737 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32738- pgoff, flags);
32739+ pgoff, flags, offset);
32740 }
32741 #endif /* CONFIG_HUGETLB_PAGE */
32742
32743diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32744index f971306..e83e0f6 100644
32745--- a/arch/x86/mm/init.c
32746+++ b/arch/x86/mm/init.c
32747@@ -4,6 +4,7 @@
32748 #include <linux/swap.h>
32749 #include <linux/memblock.h>
32750 #include <linux/bootmem.h> /* for max_low_pfn */
32751+#include <linux/tboot.h>
32752
32753 #include <asm/cacheflush.h>
32754 #include <asm/e820.h>
32755@@ -17,6 +18,8 @@
32756 #include <asm/proto.h>
32757 #include <asm/dma.h> /* for MAX_DMA_PFN */
32758 #include <asm/microcode.h>
32759+#include <asm/desc.h>
32760+#include <asm/bios_ebda.h>
32761
32762 #include "mm_internal.h"
32763
32764@@ -563,7 +566,18 @@ void __init init_mem_mapping(void)
32765 early_ioremap_page_table_range_init();
32766 #endif
32767
32768+#ifdef CONFIG_PAX_PER_CPU_PGD
32769+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32770+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32771+ KERNEL_PGD_PTRS);
32772+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32773+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32774+ KERNEL_PGD_PTRS);
32775+ load_cr3(get_cpu_pgd(0, kernel));
32776+#else
32777 load_cr3(swapper_pg_dir);
32778+#endif
32779+
32780 __flush_tlb_all();
32781
32782 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32783@@ -579,10 +593,40 @@ void __init init_mem_mapping(void)
32784 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32785 * mmio resources as well as potential bios/acpi data regions.
32786 */
32787+
32788+#ifdef CONFIG_GRKERNSEC_KMEM
32789+static unsigned int ebda_start __read_only;
32790+static unsigned int ebda_end __read_only;
32791+#endif
32792+
32793 int devmem_is_allowed(unsigned long pagenr)
32794 {
32795- if (pagenr < 256)
32796+#ifdef CONFIG_GRKERNSEC_KMEM
32797+ /* allow BDA */
32798+ if (!pagenr)
32799 return 1;
32800+ /* allow EBDA */
32801+ if (pagenr >= ebda_start && pagenr < ebda_end)
32802+ return 1;
32803+ /* if tboot is in use, allow access to its hardcoded serial log range */
32804+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32805+ return 1;
32806+#else
32807+ if (!pagenr)
32808+ return 1;
32809+#ifdef CONFIG_VM86
32810+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32811+ return 1;
32812+#endif
32813+#endif
32814+
32815+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32816+ return 1;
32817+#ifdef CONFIG_GRKERNSEC_KMEM
32818+ /* throw out everything else below 1MB */
32819+ if (pagenr <= 256)
32820+ return 0;
32821+#endif
32822 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32823 return 0;
32824 if (!page_is_ram(pagenr))
32825@@ -628,8 +672,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32826 #endif
32827 }
32828
32829+#ifdef CONFIG_GRKERNSEC_KMEM
32830+static inline void gr_init_ebda(void)
32831+{
32832+ unsigned int ebda_addr;
32833+ unsigned int ebda_size = 0;
32834+
32835+ ebda_addr = get_bios_ebda();
32836+ if (ebda_addr) {
32837+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32838+ ebda_size <<= 10;
32839+ }
32840+ if (ebda_addr && ebda_size) {
32841+ ebda_start = ebda_addr >> PAGE_SHIFT;
32842+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32843+ } else {
32844+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32845+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32846+ }
32847+}
32848+#else
32849+static inline void gr_init_ebda(void) { }
32850+#endif
32851+
32852 void free_initmem(void)
32853 {
32854+#ifdef CONFIG_PAX_KERNEXEC
32855+#ifdef CONFIG_X86_32
32856+ /* PaX: limit KERNEL_CS to actual size */
32857+ unsigned long addr, limit;
32858+ struct desc_struct d;
32859+ int cpu;
32860+#else
32861+ pgd_t *pgd;
32862+ pud_t *pud;
32863+ pmd_t *pmd;
32864+ unsigned long addr, end;
32865+#endif
32866+#endif
32867+
32868+ gr_init_ebda();
32869+
32870+#ifdef CONFIG_PAX_KERNEXEC
32871+#ifdef CONFIG_X86_32
32872+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32873+ limit = (limit - 1UL) >> PAGE_SHIFT;
32874+
32875+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32876+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32877+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32878+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32879+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32880+ }
32881+
32882+ /* PaX: make KERNEL_CS read-only */
32883+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32884+ if (!paravirt_enabled())
32885+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32886+/*
32887+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32888+ pgd = pgd_offset_k(addr);
32889+ pud = pud_offset(pgd, addr);
32890+ pmd = pmd_offset(pud, addr);
32891+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32892+ }
32893+*/
32894+#ifdef CONFIG_X86_PAE
32895+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32896+/*
32897+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32898+ pgd = pgd_offset_k(addr);
32899+ pud = pud_offset(pgd, addr);
32900+ pmd = pmd_offset(pud, addr);
32901+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32902+ }
32903+*/
32904+#endif
32905+
32906+#ifdef CONFIG_MODULES
32907+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32908+#endif
32909+
32910+#else
32911+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32912+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32913+ pgd = pgd_offset_k(addr);
32914+ pud = pud_offset(pgd, addr);
32915+ pmd = pmd_offset(pud, addr);
32916+ if (!pmd_present(*pmd))
32917+ continue;
32918+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32919+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32920+ else
32921+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32922+ }
32923+
32924+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32925+ end = addr + KERNEL_IMAGE_SIZE;
32926+ for (; addr < end; addr += PMD_SIZE) {
32927+ pgd = pgd_offset_k(addr);
32928+ pud = pud_offset(pgd, addr);
32929+ pmd = pmd_offset(pud, addr);
32930+ if (!pmd_present(*pmd))
32931+ continue;
32932+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32933+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32934+ }
32935+#endif
32936+
32937+ flush_tlb_all();
32938+#endif
32939+
32940 free_init_pages("unused kernel",
32941 (unsigned long)(&__init_begin),
32942 (unsigned long)(&__init_end));
32943diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32944index e395048..cd38278 100644
32945--- a/arch/x86/mm/init_32.c
32946+++ b/arch/x86/mm/init_32.c
32947@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32948 bool __read_mostly __vmalloc_start_set = false;
32949
32950 /*
32951- * Creates a middle page table and puts a pointer to it in the
32952- * given global directory entry. This only returns the gd entry
32953- * in non-PAE compilation mode, since the middle layer is folded.
32954- */
32955-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32956-{
32957- pud_t *pud;
32958- pmd_t *pmd_table;
32959-
32960-#ifdef CONFIG_X86_PAE
32961- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32962- pmd_table = (pmd_t *)alloc_low_page();
32963- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32964- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32965- pud = pud_offset(pgd, 0);
32966- BUG_ON(pmd_table != pmd_offset(pud, 0));
32967-
32968- return pmd_table;
32969- }
32970-#endif
32971- pud = pud_offset(pgd, 0);
32972- pmd_table = pmd_offset(pud, 0);
32973-
32974- return pmd_table;
32975-}
32976-
32977-/*
32978 * Create a page table and place a pointer to it in a middle page
32979 * directory entry:
32980 */
32981@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32982 pte_t *page_table = (pte_t *)alloc_low_page();
32983
32984 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32985+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32986+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32987+#else
32988 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32989+#endif
32990 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32991 }
32992
32993 return pte_offset_kernel(pmd, 0);
32994 }
32995
32996+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32997+{
32998+ pud_t *pud;
32999+ pmd_t *pmd_table;
33000+
33001+ pud = pud_offset(pgd, 0);
33002+ pmd_table = pmd_offset(pud, 0);
33003+
33004+ return pmd_table;
33005+}
33006+
33007 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
33008 {
33009 int pgd_idx = pgd_index(vaddr);
33010@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33011 int pgd_idx, pmd_idx;
33012 unsigned long vaddr;
33013 pgd_t *pgd;
33014+ pud_t *pud;
33015 pmd_t *pmd;
33016 pte_t *pte = NULL;
33017 unsigned long count = page_table_range_init_count(start, end);
33018@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33019 pgd = pgd_base + pgd_idx;
33020
33021 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
33022- pmd = one_md_table_init(pgd);
33023- pmd = pmd + pmd_index(vaddr);
33024+ pud = pud_offset(pgd, vaddr);
33025+ pmd = pmd_offset(pud, vaddr);
33026+
33027+#ifdef CONFIG_X86_PAE
33028+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33029+#endif
33030+
33031 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
33032 pmd++, pmd_idx++) {
33033 pte = page_table_kmap_check(one_page_table_init(pmd),
33034@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33035 }
33036 }
33037
33038-static inline int is_kernel_text(unsigned long addr)
33039+static inline int is_kernel_text(unsigned long start, unsigned long end)
33040 {
33041- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
33042- return 1;
33043- return 0;
33044+ if ((start >= ktla_ktva((unsigned long)_etext) ||
33045+ end <= ktla_ktva((unsigned long)_stext)) &&
33046+ (start >= ktla_ktva((unsigned long)_einittext) ||
33047+ end <= ktla_ktva((unsigned long)_sinittext)) &&
33048+
33049+#ifdef CONFIG_ACPI_SLEEP
33050+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
33051+#endif
33052+
33053+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
33054+ return 0;
33055+ return 1;
33056 }
33057
33058 /*
33059@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
33060 unsigned long last_map_addr = end;
33061 unsigned long start_pfn, end_pfn;
33062 pgd_t *pgd_base = swapper_pg_dir;
33063- int pgd_idx, pmd_idx, pte_ofs;
33064+ unsigned int pgd_idx, pmd_idx, pte_ofs;
33065 unsigned long pfn;
33066 pgd_t *pgd;
33067+ pud_t *pud;
33068 pmd_t *pmd;
33069 pte_t *pte;
33070 unsigned pages_2m, pages_4k;
33071@@ -291,8 +295,13 @@ repeat:
33072 pfn = start_pfn;
33073 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33074 pgd = pgd_base + pgd_idx;
33075- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33076- pmd = one_md_table_init(pgd);
33077+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33078+ pud = pud_offset(pgd, 0);
33079+ pmd = pmd_offset(pud, 0);
33080+
33081+#ifdef CONFIG_X86_PAE
33082+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33083+#endif
33084
33085 if (pfn >= end_pfn)
33086 continue;
33087@@ -304,14 +313,13 @@ repeat:
33088 #endif
33089 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33090 pmd++, pmd_idx++) {
33091- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33092+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33093
33094 /*
33095 * Map with big pages if possible, otherwise
33096 * create normal page tables:
33097 */
33098 if (use_pse) {
33099- unsigned int addr2;
33100 pgprot_t prot = PAGE_KERNEL_LARGE;
33101 /*
33102 * first pass will use the same initial
33103@@ -322,11 +330,7 @@ repeat:
33104 _PAGE_PSE);
33105
33106 pfn &= PMD_MASK >> PAGE_SHIFT;
33107- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33108- PAGE_OFFSET + PAGE_SIZE-1;
33109-
33110- if (is_kernel_text(addr) ||
33111- is_kernel_text(addr2))
33112+ if (is_kernel_text(address, address + PMD_SIZE))
33113 prot = PAGE_KERNEL_LARGE_EXEC;
33114
33115 pages_2m++;
33116@@ -343,7 +347,7 @@ repeat:
33117 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33118 pte += pte_ofs;
33119 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33120- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33121+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33122 pgprot_t prot = PAGE_KERNEL;
33123 /*
33124 * first pass will use the same initial
33125@@ -351,7 +355,7 @@ repeat:
33126 */
33127 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33128
33129- if (is_kernel_text(addr))
33130+ if (is_kernel_text(address, address + PAGE_SIZE))
33131 prot = PAGE_KERNEL_EXEC;
33132
33133 pages_4k++;
33134@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33135
33136 pud = pud_offset(pgd, va);
33137 pmd = pmd_offset(pud, va);
33138- if (!pmd_present(*pmd))
33139+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33140 break;
33141
33142 /* should not be large page here */
33143@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33144
33145 static void __init pagetable_init(void)
33146 {
33147- pgd_t *pgd_base = swapper_pg_dir;
33148-
33149- permanent_kmaps_init(pgd_base);
33150+ permanent_kmaps_init(swapper_pg_dir);
33151 }
33152
33153-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
33154+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
33155 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33156
33157 /* user-defined highmem size */
33158@@ -787,10 +789,10 @@ void __init mem_init(void)
33159 ((unsigned long)&__init_end -
33160 (unsigned long)&__init_begin) >> 10,
33161
33162- (unsigned long)&_etext, (unsigned long)&_edata,
33163- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33164+ (unsigned long)&_sdata, (unsigned long)&_edata,
33165+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33166
33167- (unsigned long)&_text, (unsigned long)&_etext,
33168+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33169 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33170
33171 /*
33172@@ -883,6 +885,7 @@ void set_kernel_text_rw(void)
33173 if (!kernel_set_to_readonly)
33174 return;
33175
33176+ start = ktla_ktva(start);
33177 pr_debug("Set kernel text: %lx - %lx for read write\n",
33178 start, start+size);
33179
33180@@ -897,6 +900,7 @@ void set_kernel_text_ro(void)
33181 if (!kernel_set_to_readonly)
33182 return;
33183
33184+ start = ktla_ktva(start);
33185 pr_debug("Set kernel text: %lx - %lx for read only\n",
33186 start, start+size);
33187
33188@@ -925,6 +929,7 @@ void mark_rodata_ro(void)
33189 unsigned long start = PFN_ALIGN(_text);
33190 unsigned long size = PFN_ALIGN(_etext) - start;
33191
33192+ start = ktla_ktva(start);
33193 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33194 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33195 size >> 10);
33196diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33197index df1a992..94c272c 100644
33198--- a/arch/x86/mm/init_64.c
33199+++ b/arch/x86/mm/init_64.c
33200@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33201 * around without checking the pgd every time.
33202 */
33203
33204-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
33205+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
33206 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33207
33208 int force_personality32;
33209@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33210
33211 for (address = start; address <= end; address += PGDIR_SIZE) {
33212 const pgd_t *pgd_ref = pgd_offset_k(address);
33213+
33214+#ifdef CONFIG_PAX_PER_CPU_PGD
33215+ unsigned long cpu;
33216+#else
33217 struct page *page;
33218+#endif
33219
33220 if (pgd_none(*pgd_ref))
33221 continue;
33222
33223 spin_lock(&pgd_lock);
33224+
33225+#ifdef CONFIG_PAX_PER_CPU_PGD
33226+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33227+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33228+
33229+ if (pgd_none(*pgd))
33230+ set_pgd(pgd, *pgd_ref);
33231+ else
33232+ BUG_ON(pgd_page_vaddr(*pgd)
33233+ != pgd_page_vaddr(*pgd_ref));
33234+ pgd = pgd_offset_cpu(cpu, kernel, address);
33235+#else
33236 list_for_each_entry(page, &pgd_list, lru) {
33237 pgd_t *pgd;
33238 spinlock_t *pgt_lock;
33239@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33240 /* the pgt_lock only for Xen */
33241 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33242 spin_lock(pgt_lock);
33243+#endif
33244
33245 if (pgd_none(*pgd))
33246 set_pgd(pgd, *pgd_ref);
33247@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33248 BUG_ON(pgd_page_vaddr(*pgd)
33249 != pgd_page_vaddr(*pgd_ref));
33250
33251+#ifndef CONFIG_PAX_PER_CPU_PGD
33252 spin_unlock(pgt_lock);
33253+#endif
33254+
33255 }
33256 spin_unlock(&pgd_lock);
33257 }
33258@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33259 {
33260 if (pgd_none(*pgd)) {
33261 pud_t *pud = (pud_t *)spp_getpage();
33262- pgd_populate(&init_mm, pgd, pud);
33263+ pgd_populate_kernel(&init_mm, pgd, pud);
33264 if (pud != pud_offset(pgd, 0))
33265 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33266 pud, pud_offset(pgd, 0));
33267@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33268 {
33269 if (pud_none(*pud)) {
33270 pmd_t *pmd = (pmd_t *) spp_getpage();
33271- pud_populate(&init_mm, pud, pmd);
33272+ pud_populate_kernel(&init_mm, pud, pmd);
33273 if (pmd != pmd_offset(pud, 0))
33274 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33275 pmd, pmd_offset(pud, 0));
33276@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33277 pmd = fill_pmd(pud, vaddr);
33278 pte = fill_pte(pmd, vaddr);
33279
33280+ pax_open_kernel();
33281 set_pte(pte, new_pte);
33282+ pax_close_kernel();
33283
33284 /*
33285 * It's enough to flush this one mapping.
33286@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33287 pgd = pgd_offset_k((unsigned long)__va(phys));
33288 if (pgd_none(*pgd)) {
33289 pud = (pud_t *) spp_getpage();
33290- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33291- _PAGE_USER));
33292+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33293 }
33294 pud = pud_offset(pgd, (unsigned long)__va(phys));
33295 if (pud_none(*pud)) {
33296 pmd = (pmd_t *) spp_getpage();
33297- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33298- _PAGE_USER));
33299+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33300 }
33301 pmd = pmd_offset(pud, phys);
33302 BUG_ON(!pmd_none(*pmd));
33303@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33304 prot);
33305
33306 spin_lock(&init_mm.page_table_lock);
33307- pud_populate(&init_mm, pud, pmd);
33308+ pud_populate_kernel(&init_mm, pud, pmd);
33309 spin_unlock(&init_mm.page_table_lock);
33310 }
33311 __flush_tlb_all();
33312@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
33313 page_size_mask);
33314
33315 spin_lock(&init_mm.page_table_lock);
33316- pgd_populate(&init_mm, pgd, pud);
33317+ pgd_populate_kernel(&init_mm, pgd, pud);
33318 spin_unlock(&init_mm.page_table_lock);
33319 pgd_changed = true;
33320 }
33321@@ -1195,8 +1216,8 @@ static struct vm_operations_struct gate_vma_ops = {
33322 static struct vm_area_struct gate_vma = {
33323 .vm_start = VSYSCALL_ADDR,
33324 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
33325- .vm_page_prot = PAGE_READONLY_EXEC,
33326- .vm_flags = VM_READ | VM_EXEC,
33327+ .vm_page_prot = PAGE_READONLY,
33328+ .vm_flags = VM_READ,
33329 .vm_ops = &gate_vma_ops,
33330 };
33331
33332diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33333index 7b179b4..6bd17777 100644
33334--- a/arch/x86/mm/iomap_32.c
33335+++ b/arch/x86/mm/iomap_32.c
33336@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33337 type = kmap_atomic_idx_push();
33338 idx = type + KM_TYPE_NR * smp_processor_id();
33339 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33340+
33341+ pax_open_kernel();
33342 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33343+ pax_close_kernel();
33344+
33345 arch_flush_lazy_mmu_mode();
33346
33347 return (void *)vaddr;
33348diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33349index baff1da..2816ef4 100644
33350--- a/arch/x86/mm/ioremap.c
33351+++ b/arch/x86/mm/ioremap.c
33352@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33353 unsigned long i;
33354
33355 for (i = 0; i < nr_pages; ++i)
33356- if (pfn_valid(start_pfn + i) &&
33357- !PageReserved(pfn_to_page(start_pfn + i)))
33358+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33359+ !PageReserved(pfn_to_page(start_pfn + i))))
33360 return 1;
33361
33362 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33363@@ -268,7 +268,7 @@ EXPORT_SYMBOL(ioremap_prot);
33364 *
33365 * Caller must ensure there is only one unmapping for the same pointer.
33366 */
33367-void iounmap(volatile void __iomem *addr)
33368+void iounmap(const volatile void __iomem *addr)
33369 {
33370 struct vm_struct *p, *o;
33371
33372@@ -322,6 +322,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
33373
33374 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33375 if (page_is_ram(start >> PAGE_SHIFT))
33376+#ifdef CONFIG_HIGHMEM
33377+ if ((start >> PAGE_SHIFT) < max_low_pfn)
33378+#endif
33379 return __va(phys);
33380
33381 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33382@@ -334,13 +337,16 @@ void *xlate_dev_mem_ptr(unsigned long phys)
33383 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
33384 {
33385 if (page_is_ram(phys >> PAGE_SHIFT))
33386+#ifdef CONFIG_HIGHMEM
33387+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33388+#endif
33389 return;
33390
33391 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33392 return;
33393 }
33394
33395-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33396+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33397
33398 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33399 {
33400@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
33401 early_ioremap_setup();
33402
33403 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33404- memset(bm_pte, 0, sizeof(bm_pte));
33405- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33406+ pmd_populate_user(&init_mm, pmd, bm_pte);
33407
33408 /*
33409 * The boot-ioremap range spans multiple pmds, for which
33410diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33411index dd89a13..d77bdcc 100644
33412--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33413+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33414@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33415 * memory (e.g. tracked pages)? For now, we need this to avoid
33416 * invoking kmemcheck for PnP BIOS calls.
33417 */
33418- if (regs->flags & X86_VM_MASK)
33419+ if (v8086_mode(regs))
33420 return false;
33421- if (regs->cs != __KERNEL_CS)
33422+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33423 return false;
33424
33425 pte = kmemcheck_pte_lookup(address);
33426diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33427index 25e7e13..1964579 100644
33428--- a/arch/x86/mm/mmap.c
33429+++ b/arch/x86/mm/mmap.c
33430@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
33431 * Leave an at least ~128 MB hole with possible stack randomization.
33432 */
33433 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33434-#define MAX_GAP (TASK_SIZE/6*5)
33435+#define MAX_GAP (pax_task_size/6*5)
33436
33437 static int mmap_is_legacy(void)
33438 {
33439@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33440 return rnd << PAGE_SHIFT;
33441 }
33442
33443-static unsigned long mmap_base(void)
33444+static unsigned long mmap_base(struct mm_struct *mm)
33445 {
33446 unsigned long gap = rlimit(RLIMIT_STACK);
33447+ unsigned long pax_task_size = TASK_SIZE;
33448+
33449+#ifdef CONFIG_PAX_SEGMEXEC
33450+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33451+ pax_task_size = SEGMEXEC_TASK_SIZE;
33452+#endif
33453
33454 if (gap < MIN_GAP)
33455 gap = MIN_GAP;
33456 else if (gap > MAX_GAP)
33457 gap = MAX_GAP;
33458
33459- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33460+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33461 }
33462
33463 /*
33464 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33465 * does, but not when emulating X86_32
33466 */
33467-static unsigned long mmap_legacy_base(void)
33468+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33469 {
33470- if (mmap_is_ia32())
33471+ if (mmap_is_ia32()) {
33472+
33473+#ifdef CONFIG_PAX_SEGMEXEC
33474+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33475+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33476+ else
33477+#endif
33478+
33479 return TASK_UNMAPPED_BASE;
33480- else
33481+ } else
33482 return TASK_UNMAPPED_BASE + mmap_rnd();
33483 }
33484
33485@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33486 */
33487 void arch_pick_mmap_layout(struct mm_struct *mm)
33488 {
33489- mm->mmap_legacy_base = mmap_legacy_base();
33490- mm->mmap_base = mmap_base();
33491+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33492+ mm->mmap_base = mmap_base(mm);
33493+
33494+#ifdef CONFIG_PAX_RANDMMAP
33495+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33496+ mm->mmap_legacy_base += mm->delta_mmap;
33497+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33498+ }
33499+#endif
33500
33501 if (mmap_is_legacy()) {
33502 mm->mmap_base = mm->mmap_legacy_base;
33503diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33504index 0057a7a..95c7edd 100644
33505--- a/arch/x86/mm/mmio-mod.c
33506+++ b/arch/x86/mm/mmio-mod.c
33507@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33508 break;
33509 default:
33510 {
33511- unsigned char *ip = (unsigned char *)instptr;
33512+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33513 my_trace->opcode = MMIO_UNKNOWN_OP;
33514 my_trace->width = 0;
33515 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33516@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33517 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33518 void __iomem *addr)
33519 {
33520- static atomic_t next_id;
33521+ static atomic_unchecked_t next_id;
33522 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33523 /* These are page-unaligned. */
33524 struct mmiotrace_map map = {
33525@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33526 .private = trace
33527 },
33528 .phys = offset,
33529- .id = atomic_inc_return(&next_id)
33530+ .id = atomic_inc_return_unchecked(&next_id)
33531 };
33532 map.map_id = trace->id;
33533
33534@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33535 ioremap_trace_core(offset, size, addr);
33536 }
33537
33538-static void iounmap_trace_core(volatile void __iomem *addr)
33539+static void iounmap_trace_core(const volatile void __iomem *addr)
33540 {
33541 struct mmiotrace_map map = {
33542 .phys = 0,
33543@@ -328,7 +328,7 @@ not_enabled:
33544 }
33545 }
33546
33547-void mmiotrace_iounmap(volatile void __iomem *addr)
33548+void mmiotrace_iounmap(const volatile void __iomem *addr)
33549 {
33550 might_sleep();
33551 if (is_enabled()) /* recheck and proper locking in *_core() */
33552diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33553index a32b706..efb308b 100644
33554--- a/arch/x86/mm/numa.c
33555+++ b/arch/x86/mm/numa.c
33556@@ -478,7 +478,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
33557 return true;
33558 }
33559
33560-static int __init numa_register_memblks(struct numa_meminfo *mi)
33561+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33562 {
33563 unsigned long uninitialized_var(pfn_align);
33564 int i, nid;
33565diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33566index ae242a7..1c7998f 100644
33567--- a/arch/x86/mm/pageattr.c
33568+++ b/arch/x86/mm/pageattr.c
33569@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33570 */
33571 #ifdef CONFIG_PCI_BIOS
33572 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33573- pgprot_val(forbidden) |= _PAGE_NX;
33574+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33575 #endif
33576
33577 /*
33578@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33579 * Does not cover __inittext since that is gone later on. On
33580 * 64bit we do not enforce !NX on the low mapping
33581 */
33582- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33583- pgprot_val(forbidden) |= _PAGE_NX;
33584+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33585+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33586
33587+#ifdef CONFIG_DEBUG_RODATA
33588 /*
33589 * The .rodata section needs to be read-only. Using the pfn
33590 * catches all aliases.
33591@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33592 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33593 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33594 pgprot_val(forbidden) |= _PAGE_RW;
33595+#endif
33596
33597 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33598 /*
33599@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33600 }
33601 #endif
33602
33603+#ifdef CONFIG_PAX_KERNEXEC
33604+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33605+ pgprot_val(forbidden) |= _PAGE_RW;
33606+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33607+ }
33608+#endif
33609+
33610 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33611
33612 return prot;
33613@@ -420,23 +429,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33614 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33615 {
33616 /* change init_mm */
33617+ pax_open_kernel();
33618 set_pte_atomic(kpte, pte);
33619+
33620 #ifdef CONFIG_X86_32
33621 if (!SHARED_KERNEL_PMD) {
33622+
33623+#ifdef CONFIG_PAX_PER_CPU_PGD
33624+ unsigned long cpu;
33625+#else
33626 struct page *page;
33627+#endif
33628
33629+#ifdef CONFIG_PAX_PER_CPU_PGD
33630+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33631+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33632+#else
33633 list_for_each_entry(page, &pgd_list, lru) {
33634- pgd_t *pgd;
33635+ pgd_t *pgd = (pgd_t *)page_address(page);
33636+#endif
33637+
33638 pud_t *pud;
33639 pmd_t *pmd;
33640
33641- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33642+ pgd += pgd_index(address);
33643 pud = pud_offset(pgd, address);
33644 pmd = pmd_offset(pud, address);
33645 set_pte_atomic((pte_t *)pmd, pte);
33646 }
33647 }
33648 #endif
33649+ pax_close_kernel();
33650 }
33651
33652 static int
33653diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33654index 6574388..87e9bef 100644
33655--- a/arch/x86/mm/pat.c
33656+++ b/arch/x86/mm/pat.c
33657@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
33658
33659 if (!entry) {
33660 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33661- current->comm, current->pid, start, end - 1);
33662+ current->comm, task_pid_nr(current), start, end - 1);
33663 return -EINVAL;
33664 }
33665
33666@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33667
33668 while (cursor < to) {
33669 if (!devmem_is_allowed(pfn)) {
33670- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33671- current->comm, from, to - 1);
33672+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33673+ current->comm, from, to - 1, cursor);
33674 return 0;
33675 }
33676 cursor += PAGE_SIZE;
33677@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
33678 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
33679 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33680 "for [mem %#010Lx-%#010Lx]\n",
33681- current->comm, current->pid,
33682+ current->comm, task_pid_nr(current),
33683 cattr_name(flags),
33684 base, (unsigned long long)(base + size-1));
33685 return -EINVAL;
33686@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33687 flags = lookup_memtype(paddr);
33688 if (want_flags != flags) {
33689 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33690- current->comm, current->pid,
33691+ current->comm, task_pid_nr(current),
33692 cattr_name(want_flags),
33693 (unsigned long long)paddr,
33694 (unsigned long long)(paddr + size - 1),
33695@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33696 free_memtype(paddr, paddr + size);
33697 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33698 " for [mem %#010Lx-%#010Lx], got %s\n",
33699- current->comm, current->pid,
33700+ current->comm, task_pid_nr(current),
33701 cattr_name(want_flags),
33702 (unsigned long long)paddr,
33703 (unsigned long long)(paddr + size - 1),
33704diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33705index 415f6c4..d319983 100644
33706--- a/arch/x86/mm/pat_rbtree.c
33707+++ b/arch/x86/mm/pat_rbtree.c
33708@@ -160,7 +160,7 @@ success:
33709
33710 failure:
33711 printk(KERN_INFO "%s:%d conflicting memory types "
33712- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33713+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33714 end, cattr_name(found_type), cattr_name(match->type));
33715 return -EBUSY;
33716 }
33717diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33718index 9f0614d..92ae64a 100644
33719--- a/arch/x86/mm/pf_in.c
33720+++ b/arch/x86/mm/pf_in.c
33721@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33722 int i;
33723 enum reason_type rv = OTHERS;
33724
33725- p = (unsigned char *)ins_addr;
33726+ p = (unsigned char *)ktla_ktva(ins_addr);
33727 p += skip_prefix(p, &prf);
33728 p += get_opcode(p, &opcode);
33729
33730@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33731 struct prefix_bits prf;
33732 int i;
33733
33734- p = (unsigned char *)ins_addr;
33735+ p = (unsigned char *)ktla_ktva(ins_addr);
33736 p += skip_prefix(p, &prf);
33737 p += get_opcode(p, &opcode);
33738
33739@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33740 struct prefix_bits prf;
33741 int i;
33742
33743- p = (unsigned char *)ins_addr;
33744+ p = (unsigned char *)ktla_ktva(ins_addr);
33745 p += skip_prefix(p, &prf);
33746 p += get_opcode(p, &opcode);
33747
33748@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33749 struct prefix_bits prf;
33750 int i;
33751
33752- p = (unsigned char *)ins_addr;
33753+ p = (unsigned char *)ktla_ktva(ins_addr);
33754 p += skip_prefix(p, &prf);
33755 p += get_opcode(p, &opcode);
33756 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33757@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33758 struct prefix_bits prf;
33759 int i;
33760
33761- p = (unsigned char *)ins_addr;
33762+ p = (unsigned char *)ktla_ktva(ins_addr);
33763 p += skip_prefix(p, &prf);
33764 p += get_opcode(p, &opcode);
33765 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33766diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33767index 6fb6927..4fc13c0 100644
33768--- a/arch/x86/mm/pgtable.c
33769+++ b/arch/x86/mm/pgtable.c
33770@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
33771 list_del(&page->lru);
33772 }
33773
33774-#define UNSHARED_PTRS_PER_PGD \
33775- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33776+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33777+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33778
33779+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33780+{
33781+ unsigned int count = USER_PGD_PTRS;
33782
33783+ if (!pax_user_shadow_base)
33784+ return;
33785+
33786+ while (count--)
33787+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33788+}
33789+#endif
33790+
33791+#ifdef CONFIG_PAX_PER_CPU_PGD
33792+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33793+{
33794+ unsigned int count = USER_PGD_PTRS;
33795+
33796+ while (count--) {
33797+ pgd_t pgd;
33798+
33799+#ifdef CONFIG_X86_64
33800+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33801+#else
33802+ pgd = *src++;
33803+#endif
33804+
33805+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33806+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33807+#endif
33808+
33809+ *dst++ = pgd;
33810+ }
33811+
33812+}
33813+#endif
33814+
33815+#ifdef CONFIG_X86_64
33816+#define pxd_t pud_t
33817+#define pyd_t pgd_t
33818+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33819+#define pgtable_pxd_page_ctor(page) true
33820+#define pgtable_pxd_page_dtor(page)
33821+#define pxd_free(mm, pud) pud_free((mm), (pud))
33822+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33823+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33824+#define PYD_SIZE PGDIR_SIZE
33825+#else
33826+#define pxd_t pmd_t
33827+#define pyd_t pud_t
33828+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33829+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33830+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33831+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33832+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33833+#define pyd_offset(mm, address) pud_offset((mm), (address))
33834+#define PYD_SIZE PUD_SIZE
33835+#endif
33836+
33837+#ifdef CONFIG_PAX_PER_CPU_PGD
33838+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33839+static inline void pgd_dtor(pgd_t *pgd) {}
33840+#else
33841 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33842 {
33843 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33844@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
33845 pgd_list_del(pgd);
33846 spin_unlock(&pgd_lock);
33847 }
33848+#endif
33849
33850 /*
33851 * List of all pgd's needed for non-PAE so it can invalidate entries
33852@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
33853 * -- nyc
33854 */
33855
33856-#ifdef CONFIG_X86_PAE
33857+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33858 /*
33859 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33860 * updating the top-level pagetable entries to guarantee the
33861@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
33862 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33863 * and initialize the kernel pmds here.
33864 */
33865-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33866+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33867
33868 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33869 {
33870@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33871 */
33872 flush_tlb_mm(mm);
33873 }
33874+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33875+#define PREALLOCATED_PXDS USER_PGD_PTRS
33876 #else /* !CONFIG_X86_PAE */
33877
33878 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33879-#define PREALLOCATED_PMDS 0
33880+#define PREALLOCATED_PXDS 0
33881
33882 #endif /* CONFIG_X86_PAE */
33883
33884-static void free_pmds(pmd_t *pmds[])
33885+static void free_pxds(pxd_t *pxds[])
33886 {
33887 int i;
33888
33889- for(i = 0; i < PREALLOCATED_PMDS; i++)
33890- if (pmds[i]) {
33891- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33892- free_page((unsigned long)pmds[i]);
33893+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33894+ if (pxds[i]) {
33895+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33896+ free_page((unsigned long)pxds[i]);
33897 }
33898 }
33899
33900-static int preallocate_pmds(pmd_t *pmds[])
33901+static int preallocate_pxds(pxd_t *pxds[])
33902 {
33903 int i;
33904 bool failed = false;
33905
33906- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33907- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33908- if (!pmd)
33909+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33910+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33911+ if (!pxd)
33912 failed = true;
33913- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33914- free_page((unsigned long)pmd);
33915- pmd = NULL;
33916+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33917+ free_page((unsigned long)pxd);
33918+ pxd = NULL;
33919 failed = true;
33920 }
33921- pmds[i] = pmd;
33922+ pxds[i] = pxd;
33923 }
33924
33925 if (failed) {
33926- free_pmds(pmds);
33927+ free_pxds(pxds);
33928 return -ENOMEM;
33929 }
33930
33931@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
33932 * preallocate which never got a corresponding vma will need to be
33933 * freed manually.
33934 */
33935-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33936+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33937 {
33938 int i;
33939
33940- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33941+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33942 pgd_t pgd = pgdp[i];
33943
33944 if (pgd_val(pgd) != 0) {
33945- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33946+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33947
33948- pgdp[i] = native_make_pgd(0);
33949+ set_pgd(pgdp + i, native_make_pgd(0));
33950
33951- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33952- pmd_free(mm, pmd);
33953+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33954+ pxd_free(mm, pxd);
33955 }
33956 }
33957 }
33958
33959-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33960+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33961 {
33962- pud_t *pud;
33963+ pyd_t *pyd;
33964 int i;
33965
33966- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33967+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33968 return;
33969
33970- pud = pud_offset(pgd, 0);
33971-
33972- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33973- pmd_t *pmd = pmds[i];
33974+#ifdef CONFIG_X86_64
33975+ pyd = pyd_offset(mm, 0L);
33976+#else
33977+ pyd = pyd_offset(pgd, 0L);
33978+#endif
33979
33980+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33981+ pxd_t *pxd = pxds[i];
33982 if (i >= KERNEL_PGD_BOUNDARY)
33983- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33984- sizeof(pmd_t) * PTRS_PER_PMD);
33985+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33986+ sizeof(pxd_t) * PTRS_PER_PMD);
33987
33988- pud_populate(mm, pud, pmd);
33989+ pyd_populate(mm, pyd, pxd);
33990 }
33991 }
33992
33993 pgd_t *pgd_alloc(struct mm_struct *mm)
33994 {
33995 pgd_t *pgd;
33996- pmd_t *pmds[PREALLOCATED_PMDS];
33997+ pxd_t *pxds[PREALLOCATED_PXDS];
33998
33999 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
34000
34001@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34002
34003 mm->pgd = pgd;
34004
34005- if (preallocate_pmds(pmds) != 0)
34006+ if (preallocate_pxds(pxds) != 0)
34007 goto out_free_pgd;
34008
34009 if (paravirt_pgd_alloc(mm) != 0)
34010- goto out_free_pmds;
34011+ goto out_free_pxds;
34012
34013 /*
34014 * Make sure that pre-populating the pmds is atomic with
34015@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34016 spin_lock(&pgd_lock);
34017
34018 pgd_ctor(mm, pgd);
34019- pgd_prepopulate_pmd(mm, pgd, pmds);
34020+ pgd_prepopulate_pxd(mm, pgd, pxds);
34021
34022 spin_unlock(&pgd_lock);
34023
34024 return pgd;
34025
34026-out_free_pmds:
34027- free_pmds(pmds);
34028+out_free_pxds:
34029+ free_pxds(pxds);
34030 out_free_pgd:
34031 free_page((unsigned long)pgd);
34032 out:
34033@@ -313,7 +380,7 @@ out:
34034
34035 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
34036 {
34037- pgd_mop_up_pmds(mm, pgd);
34038+ pgd_mop_up_pxds(mm, pgd);
34039 pgd_dtor(pgd);
34040 paravirt_pgd_free(mm, pgd);
34041 free_page((unsigned long)pgd);
34042diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34043index 4dd8cf6..f9d143e 100644
34044--- a/arch/x86/mm/pgtable_32.c
34045+++ b/arch/x86/mm/pgtable_32.c
34046@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34047 return;
34048 }
34049 pte = pte_offset_kernel(pmd, vaddr);
34050+
34051+ pax_open_kernel();
34052 if (pte_val(pteval))
34053 set_pte_at(&init_mm, vaddr, pte, pteval);
34054 else
34055 pte_clear(&init_mm, vaddr, pte);
34056+ pax_close_kernel();
34057
34058 /*
34059 * It's enough to flush this one mapping.
34060diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34061index e666cbb..61788c45 100644
34062--- a/arch/x86/mm/physaddr.c
34063+++ b/arch/x86/mm/physaddr.c
34064@@ -10,7 +10,7 @@
34065 #ifdef CONFIG_X86_64
34066
34067 #ifdef CONFIG_DEBUG_VIRTUAL
34068-unsigned long __phys_addr(unsigned long x)
34069+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34070 {
34071 unsigned long y = x - __START_KERNEL_map;
34072
34073@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34074 #else
34075
34076 #ifdef CONFIG_DEBUG_VIRTUAL
34077-unsigned long __phys_addr(unsigned long x)
34078+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34079 {
34080 unsigned long phys_addr = x - PAGE_OFFSET;
34081 /* VMALLOC_* aren't constants */
34082diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34083index 90555bf..f5f1828 100644
34084--- a/arch/x86/mm/setup_nx.c
34085+++ b/arch/x86/mm/setup_nx.c
34086@@ -5,8 +5,10 @@
34087 #include <asm/pgtable.h>
34088 #include <asm/proto.h>
34089
34090+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34091 static int disable_nx;
34092
34093+#ifndef CONFIG_PAX_PAGEEXEC
34094 /*
34095 * noexec = on|off
34096 *
34097@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34098 return 0;
34099 }
34100 early_param("noexec", noexec_setup);
34101+#endif
34102+
34103+#endif
34104
34105 void x86_configure_nx(void)
34106 {
34107+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34108 if (cpu_has_nx && !disable_nx)
34109 __supported_pte_mask |= _PAGE_NX;
34110 else
34111+#endif
34112 __supported_pte_mask &= ~_PAGE_NX;
34113 }
34114
34115diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34116index dd8dda1..9e9b0f6 100644
34117--- a/arch/x86/mm/tlb.c
34118+++ b/arch/x86/mm/tlb.c
34119@@ -48,7 +48,11 @@ void leave_mm(int cpu)
34120 BUG();
34121 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34122 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34123+
34124+#ifndef CONFIG_PAX_PER_CPU_PGD
34125 load_cr3(swapper_pg_dir);
34126+#endif
34127+
34128 }
34129 }
34130 EXPORT_SYMBOL_GPL(leave_mm);
34131diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34132new file mode 100644
34133index 0000000..dace51c
34134--- /dev/null
34135+++ b/arch/x86/mm/uderef_64.c
34136@@ -0,0 +1,37 @@
34137+#include <linux/mm.h>
34138+#include <asm/pgtable.h>
34139+#include <asm/uaccess.h>
34140+
34141+#ifdef CONFIG_PAX_MEMORY_UDEREF
34142+/* PaX: due to the special call convention these functions must
34143+ * - remain leaf functions under all configurations,
34144+ * - never be called directly, only dereferenced from the wrappers.
34145+ */
34146+void __pax_open_userland(void)
34147+{
34148+ unsigned int cpu;
34149+
34150+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34151+ return;
34152+
34153+ cpu = raw_get_cpu();
34154+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34155+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34156+ raw_put_cpu_no_resched();
34157+}
34158+EXPORT_SYMBOL(__pax_open_userland);
34159+
34160+void __pax_close_userland(void)
34161+{
34162+ unsigned int cpu;
34163+
34164+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34165+ return;
34166+
34167+ cpu = raw_get_cpu();
34168+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34169+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34170+ raw_put_cpu_no_resched();
34171+}
34172+EXPORT_SYMBOL(__pax_close_userland);
34173+#endif
34174diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34175index 6440221..f746de8 100644
34176--- a/arch/x86/net/bpf_jit.S
34177+++ b/arch/x86/net/bpf_jit.S
34178@@ -9,19 +9,17 @@
34179 */
34180 #include <linux/linkage.h>
34181 #include <asm/dwarf2.h>
34182+#include <asm/alternative-asm.h>
34183
34184 /*
34185 * Calling convention :
34186- * rbx : skb pointer (callee saved)
34187+ * rdi : skb pointer
34188 * esi : offset of byte(s) to fetch in skb (can be scratched)
34189- * r10 : copy of skb->data
34190+ * r8 : copy of skb->data
34191 * r9d : hlen = skb->len - skb->data_len
34192 */
34193-#define SKBDATA %r10
34194+#define SKBDATA %r8
34195 #define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */
34196-#define MAX_BPF_STACK (512 /* from filter.h */ + \
34197- 32 /* space for rbx,r13,r14,r15 */ + \
34198- 8 /* space for skb_copy_bits */)
34199
34200 sk_load_word:
34201 .globl sk_load_word
34202@@ -38,6 +36,7 @@ sk_load_word_positive_offset:
34203 jle bpf_slow_path_word
34204 mov (SKBDATA,%rsi),%eax
34205 bswap %eax /* ntohl() */
34206+ pax_force_retaddr
34207 ret
34208
34209 sk_load_half:
34210@@ -55,6 +54,7 @@ sk_load_half_positive_offset:
34211 jle bpf_slow_path_half
34212 movzwl (SKBDATA,%rsi),%eax
34213 rol $8,%ax # ntohs()
34214+ pax_force_retaddr
34215 ret
34216
34217 sk_load_byte:
34218@@ -69,45 +69,83 @@ sk_load_byte_positive_offset:
34219 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34220 jle bpf_slow_path_byte
34221 movzbl (SKBDATA,%rsi),%eax
34222+ pax_force_retaddr
34223+ ret
34224+
34225+/**
34226+ * sk_load_byte_msh - BPF_S_LDX_B_MSH helper
34227+ *
34228+ * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf)
34229+ * Must preserve A accumulator (%eax)
34230+ * Inputs : %esi is the offset value
34231+ */
34232+sk_load_byte_msh:
34233+ .globl sk_load_byte_msh
34234+ test %esi,%esi
34235+ js bpf_slow_path_byte_msh_neg
34236+
34237+sk_load_byte_msh_positive_offset:
34238+ .globl sk_load_byte_msh_positive_offset
34239+ cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */
34240+ jle bpf_slow_path_byte_msh
34241+ movzbl (SKBDATA,%rsi),%ebx
34242+ and $15,%bl
34243+ shl $2,%bl
34244+ pax_force_retaddr
34245 ret
34246
34247 /* rsi contains offset and can be scratched */
34248 #define bpf_slow_path_common(LEN) \
34249- mov %rbx, %rdi; /* arg1 == skb */ \
34250+ push %rdi; /* save skb */ \
34251 push %r9; \
34252 push SKBDATA; \
34253 /* rsi already has offset */ \
34254 mov $LEN,%ecx; /* len */ \
34255- lea - MAX_BPF_STACK + 32(%rbp),%rdx; \
34256+ lea -12(%rbp),%rdx; \
34257 call skb_copy_bits; \
34258 test %eax,%eax; \
34259 pop SKBDATA; \
34260- pop %r9;
34261+ pop %r9; \
34262+ pop %rdi
34263
34264
34265 bpf_slow_path_word:
34266 bpf_slow_path_common(4)
34267 js bpf_error
34268- mov - MAX_BPF_STACK + 32(%rbp),%eax
34269+ mov -12(%rbp),%eax
34270 bswap %eax
34271+ pax_force_retaddr
34272 ret
34273
34274 bpf_slow_path_half:
34275 bpf_slow_path_common(2)
34276 js bpf_error
34277- mov - MAX_BPF_STACK + 32(%rbp),%ax
34278+ mov -12(%rbp),%ax
34279 rol $8,%ax
34280 movzwl %ax,%eax
34281+ pax_force_retaddr
34282 ret
34283
34284 bpf_slow_path_byte:
34285 bpf_slow_path_common(1)
34286 js bpf_error
34287- movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34288+ movzbl -12(%rbp),%eax
34289+ pax_force_retaddr
34290+ ret
34291+
34292+bpf_slow_path_byte_msh:
34293+ xchg %eax,%ebx /* dont lose A , X is about to be scratched */
34294+ bpf_slow_path_common(1)
34295+ js bpf_error
34296+ movzbl -12(%rbp),%eax
34297+ and $15,%al
34298+ shl $2,%al
34299+ xchg %eax,%ebx
34300+ pax_force_retaddr
34301 ret
34302
34303 #define sk_negative_common(SIZE) \
34304- mov %rbx, %rdi; /* arg1 == skb */ \
34305+ push %rdi; /* save skb */ \
34306 push %r9; \
34307 push SKBDATA; \
34308 /* rsi already has offset */ \
34309@@ -116,8 +154,10 @@ bpf_slow_path_byte:
34310 test %rax,%rax; \
34311 pop SKBDATA; \
34312 pop %r9; \
34313+ pop %rdi; \
34314 jz bpf_error
34315
34316+
34317 bpf_slow_path_word_neg:
34318 cmp SKF_MAX_NEG_OFF, %esi /* test range */
34319 jl bpf_error /* offset lower -> error */
34320@@ -126,6 +166,7 @@ sk_load_word_negative_offset:
34321 sk_negative_common(4)
34322 mov (%rax), %eax
34323 bswap %eax
34324+ pax_force_retaddr
34325 ret
34326
34327 bpf_slow_path_half_neg:
34328@@ -137,6 +178,7 @@ sk_load_half_negative_offset:
34329 mov (%rax),%ax
34330 rol $8,%ax
34331 movzwl %ax,%eax
34332+ pax_force_retaddr
34333 ret
34334
34335 bpf_slow_path_byte_neg:
34336@@ -146,14 +188,27 @@ sk_load_byte_negative_offset:
34337 .globl sk_load_byte_negative_offset
34338 sk_negative_common(1)
34339 movzbl (%rax), %eax
34340+ pax_force_retaddr
34341+ ret
34342+
34343+bpf_slow_path_byte_msh_neg:
34344+ cmp SKF_MAX_NEG_OFF, %esi
34345+ jl bpf_error
34346+sk_load_byte_msh_negative_offset:
34347+ .globl sk_load_byte_msh_negative_offset
34348+ xchg %eax,%ebx /* dont lose A , X is about to be scratched */
34349+ sk_negative_common(1)
34350+ movzbl (%rax),%eax
34351+ and $15,%al
34352+ shl $2,%al
34353+ xchg %eax,%ebx
34354+ pax_force_retaddr
34355 ret
34356
34357 bpf_error:
34358 # force a return 0 from jit handler
34359- xor %eax,%eax
34360- mov - MAX_BPF_STACK(%rbp),%rbx
34361- mov - MAX_BPF_STACK + 8(%rbp),%r13
34362- mov - MAX_BPF_STACK + 16(%rbp),%r14
34363- mov - MAX_BPF_STACK + 24(%rbp),%r15
34364+ xor %eax,%eax
34365+ mov -8(%rbp),%rbx
34366 leaveq
34367+ pax_force_retaddr
34368 ret
34369diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34370index 99bef86..bdfb5c6 100644
34371--- a/arch/x86/net/bpf_jit_comp.c
34372+++ b/arch/x86/net/bpf_jit_comp.c
34373@@ -1,7 +1,6 @@
34374 /* bpf_jit_comp.c : BPF JIT compiler
34375 *
34376 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
34377- * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
34378 *
34379 * This program is free software; you can redistribute it and/or
34380 * modify it under the terms of the GNU General Public License
34381@@ -15,16 +14,28 @@
34382 #include <linux/if_vlan.h>
34383 #include <linux/random.h>
34384
34385+/*
34386+ * Conventions :
34387+ * EAX : BPF A accumulator
34388+ * EBX : BPF X accumulator
34389+ * RDI : pointer to skb (first argument given to JIT function)
34390+ * RBP : frame pointer (even if CONFIG_FRAME_POINTER=n)
34391+ * ECX,EDX,ESI : scratch registers
34392+ * r9d : skb->len - skb->data_len (headlen)
34393+ * r8 : skb->data
34394+ * -8(RBP) : saved RBX value
34395+ * -16(RBP)..-80(RBP) : BPF_MEMWORDS values
34396+ */
34397 int bpf_jit_enable __read_mostly;
34398
34399 /*
34400 * assembly code in arch/x86/net/bpf_jit.S
34401 */
34402-extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
34403+extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
34404 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
34405-extern u8 sk_load_byte_positive_offset[];
34406+extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
34407 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
34408-extern u8 sk_load_byte_negative_offset[];
34409+extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
34410
34411 static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
34412 {
34413@@ -39,50 +50,113 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
34414 return ptr + len;
34415 }
34416
34417+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34418+#define MAX_INSTR_CODE_SIZE 96
34419+#else
34420+#define MAX_INSTR_CODE_SIZE 64
34421+#endif
34422+
34423 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
34424
34425 #define EMIT1(b1) EMIT(b1, 1)
34426 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
34427 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
34428 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
34429-#define EMIT1_off32(b1, off) \
34430- do {EMIT1(b1); EMIT(off, 4); } while (0)
34431-#define EMIT2_off32(b1, b2, off) \
34432- do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
34433-#define EMIT3_off32(b1, b2, b3, off) \
34434- do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
34435-#define EMIT4_off32(b1, b2, b3, b4, off) \
34436- do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
34437+
34438+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34439+/* original constant will appear in ecx */
34440+#define DILUTE_CONST_SEQUENCE(_off, _key) \
34441+do { \
34442+ /* mov ecx, randkey */ \
34443+ EMIT1(0xb9); \
34444+ EMIT(_key, 4); \
34445+ /* xor ecx, randkey ^ off */ \
34446+ EMIT2(0x81, 0xf1); \
34447+ EMIT((_key) ^ (_off), 4); \
34448+} while (0)
34449+
34450+#define EMIT1_off32(b1, _off) \
34451+do { \
34452+ switch (b1) { \
34453+ case 0x05: /* add eax, imm32 */ \
34454+ case 0x2d: /* sub eax, imm32 */ \
34455+ case 0x25: /* and eax, imm32 */ \
34456+ case 0x0d: /* or eax, imm32 */ \
34457+ case 0xb8: /* mov eax, imm32 */ \
34458+ case 0x35: /* xor eax, imm32 */ \
34459+ case 0x3d: /* cmp eax, imm32 */ \
34460+ case 0xa9: /* test eax, imm32 */ \
34461+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34462+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
34463+ break; \
34464+ case 0xbb: /* mov ebx, imm32 */ \
34465+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34466+ /* mov ebx, ecx */ \
34467+ EMIT2(0x89, 0xcb); \
34468+ break; \
34469+ case 0xbe: /* mov esi, imm32 */ \
34470+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34471+ /* mov esi, ecx */ \
34472+ EMIT2(0x89, 0xce); \
34473+ break; \
34474+ case 0xe8: /* call rel imm32, always to known funcs */ \
34475+ EMIT1(b1); \
34476+ EMIT(_off, 4); \
34477+ break; \
34478+ case 0xe9: /* jmp rel imm32 */ \
34479+ EMIT1(b1); \
34480+ EMIT(_off, 4); \
34481+ /* prevent fall-through, we're not called if off = 0 */ \
34482+ EMIT(0xcccccccc, 4); \
34483+ EMIT(0xcccccccc, 4); \
34484+ break; \
34485+ default: \
34486+ BUILD_BUG(); \
34487+ } \
34488+} while (0)
34489+
34490+#define EMIT2_off32(b1, b2, _off) \
34491+do { \
34492+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
34493+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
34494+ EMIT(randkey, 4); \
34495+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
34496+ EMIT((_off) - randkey, 4); \
34497+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
34498+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34499+ /* imul eax, ecx */ \
34500+ EMIT3(0x0f, 0xaf, 0xc1); \
34501+ } else { \
34502+ BUILD_BUG(); \
34503+ } \
34504+} while (0)
34505+#else
34506+#define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
34507+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
34508+#endif
34509+
34510+#define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
34511+#define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
34512
34513 static inline bool is_imm8(int value)
34514 {
34515 return value <= 127 && value >= -128;
34516 }
34517
34518-static inline bool is_simm32(s64 value)
34519+static inline bool is_near(int offset)
34520 {
34521- return value == (s64) (s32) value;
34522+ return offset <= 127 && offset >= -128;
34523 }
34524
34525-/* mov dst, src */
34526-#define EMIT_mov(DST, SRC) \
34527- do {if (DST != SRC) \
34528- EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
34529- } while (0)
34530-
34531-static int bpf_size_to_x86_bytes(int bpf_size)
34532-{
34533- if (bpf_size == BPF_W)
34534- return 4;
34535- else if (bpf_size == BPF_H)
34536- return 2;
34537- else if (bpf_size == BPF_B)
34538- return 1;
34539- else if (bpf_size == BPF_DW)
34540- return 4; /* imm32 */
34541- else
34542- return 0;
34543-}
34544+#define EMIT_JMP(offset) \
34545+do { \
34546+ if (offset) { \
34547+ if (is_near(offset)) \
34548+ EMIT2(0xeb, offset); /* jmp .+off8 */ \
34549+ else \
34550+ EMIT1_off32(0xe9, offset); /* jmp .+off32 */ \
34551+ } \
34552+} while (0)
34553
34554 /* list of x86 cond jumps opcodes (. + s8)
34555 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
34556@@ -93,8 +167,46 @@ static int bpf_size_to_x86_bytes(int bpf_size)
34557 #define X86_JNE 0x75
34558 #define X86_JBE 0x76
34559 #define X86_JA 0x77
34560-#define X86_JGE 0x7D
34561-#define X86_JG 0x7F
34562+
34563+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34564+#define APPEND_FLOW_VERIFY() \
34565+do { \
34566+ /* mov ecx, randkey */ \
34567+ EMIT1(0xb9); \
34568+ EMIT(randkey, 4); \
34569+ /* cmp ecx, randkey */ \
34570+ EMIT2(0x81, 0xf9); \
34571+ EMIT(randkey, 4); \
34572+ /* jz after 8 int 3s */ \
34573+ EMIT2(0x74, 0x08); \
34574+ EMIT(0xcccccccc, 4); \
34575+ EMIT(0xcccccccc, 4); \
34576+} while (0)
34577+#else
34578+#define APPEND_FLOW_VERIFY() do { } while (0)
34579+#endif
34580+
34581+#define EMIT_COND_JMP(op, offset) \
34582+do { \
34583+ if (is_near(offset)) \
34584+ EMIT2(op, offset); /* jxx .+off8 */ \
34585+ else { \
34586+ EMIT2(0x0f, op + 0x10); \
34587+ EMIT(offset, 4); /* jxx .+off32 */ \
34588+ APPEND_FLOW_VERIFY(); \
34589+ } \
34590+} while (0)
34591+
34592+#define COND_SEL(CODE, TOP, FOP) \
34593+ case CODE: \
34594+ t_op = TOP; \
34595+ f_op = FOP; \
34596+ goto cond_branch
34597+
34598+
34599+#define SEEN_DATAREF 1 /* might call external helpers */
34600+#define SEEN_XREG 2 /* ebx is used */
34601+#define SEEN_MEM 4 /* use mem[] for temporary storage */
34602
34603 static inline void bpf_flush_icache(void *start, void *end)
34604 {
34605@@ -109,804 +221,646 @@ static inline void bpf_flush_icache(void *start, void *end)
34606 #define CHOOSE_LOAD_FUNC(K, func) \
34607 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
34608
34609-struct bpf_binary_header {
34610- unsigned int pages;
34611- /* Note : for security reasons, bpf code will follow a randomly
34612- * sized amount of int3 instructions
34613- */
34614- u8 image[];
34615-};
34616+/* Helper to find the offset of pkt_type in sk_buff
34617+ * We want to make sure its still a 3bit field starting at a byte boundary.
34618+ */
34619+#define PKT_TYPE_MAX 7
34620+static int pkt_type_offset(void)
34621+{
34622+ struct sk_buff skb_probe = {
34623+ .pkt_type = ~0,
34624+ };
34625+ char *ct = (char *)&skb_probe;
34626+ unsigned int off;
34627
34628-static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
34629+ for (off = 0; off < sizeof(struct sk_buff); off++) {
34630+ if (ct[off] == PKT_TYPE_MAX)
34631+ return off;
34632+ }
34633+ pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
34634+ return -1;
34635+}
34636+
34637+/* Note : for security reasons, bpf code will follow a randomly
34638+ * sized amount of int3 instructions
34639+ */
34640+static u8 *bpf_alloc_binary(unsigned int proglen,
34641 u8 **image_ptr)
34642 {
34643 unsigned int sz, hole;
34644- struct bpf_binary_header *header;
34645+ u8 *header;
34646
34647 /* Most of BPF filters are really small,
34648 * but if some of them fill a page, allow at least
34649 * 128 extra bytes to insert a random section of int3
34650 */
34651- sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
34652- header = module_alloc(sz);
34653+ sz = round_up(proglen + 128, PAGE_SIZE);
34654+ header = module_alloc_exec(sz);
34655 if (!header)
34656 return NULL;
34657
34658+ pax_open_kernel();
34659 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
34660+ pax_close_kernel();
34661
34662- header->pages = sz / PAGE_SIZE;
34663- hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
34664+ hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
34665
34666 /* insert a random number of int3 instructions before BPF code */
34667- *image_ptr = &header->image[prandom_u32() % hole];
34668+ *image_ptr = &header[prandom_u32() % hole];
34669 return header;
34670 }
34671
34672-/* pick a register outside of BPF range for JIT internal work */
34673-#define AUX_REG (MAX_BPF_REG + 1)
34674-
34675-/* the following table maps BPF registers to x64 registers.
34676- * x64 register r12 is unused, since if used as base address register
34677- * in load/store instructions, it always needs an extra byte of encoding
34678- */
34679-static const int reg2hex[] = {
34680- [BPF_REG_0] = 0, /* rax */
34681- [BPF_REG_1] = 7, /* rdi */
34682- [BPF_REG_2] = 6, /* rsi */
34683- [BPF_REG_3] = 2, /* rdx */
34684- [BPF_REG_4] = 1, /* rcx */
34685- [BPF_REG_5] = 0, /* r8 */
34686- [BPF_REG_6] = 3, /* rbx callee saved */
34687- [BPF_REG_7] = 5, /* r13 callee saved */
34688- [BPF_REG_8] = 6, /* r14 callee saved */
34689- [BPF_REG_9] = 7, /* r15 callee saved */
34690- [BPF_REG_FP] = 5, /* rbp readonly */
34691- [AUX_REG] = 3, /* r11 temp register */
34692-};
34693-
34694-/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
34695- * which need extra byte of encoding.
34696- * rax,rcx,...,rbp have simpler encoding
34697- */
34698-static inline bool is_ereg(u32 reg)
34699-{
34700- if (reg == BPF_REG_5 || reg == AUX_REG ||
34701- (reg >= BPF_REG_7 && reg <= BPF_REG_9))
34702- return true;
34703- else
34704- return false;
34705-}
34706-
34707-/* add modifiers if 'reg' maps to x64 registers r8..r15 */
34708-static inline u8 add_1mod(u8 byte, u32 reg)
34709-{
34710- if (is_ereg(reg))
34711- byte |= 1;
34712- return byte;
34713-}
34714-
34715-static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
34716-{
34717- if (is_ereg(r1))
34718- byte |= 1;
34719- if (is_ereg(r2))
34720- byte |= 4;
34721- return byte;
34722-}
34723-
34724-/* encode 'dst_reg' register into x64 opcode 'byte' */
34725-static inline u8 add_1reg(u8 byte, u32 dst_reg)
34726-{
34727- return byte + reg2hex[dst_reg];
34728-}
34729-
34730-/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
34731-static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34732-{
34733- return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
34734-}
34735-
34736-struct jit_context {
34737- unsigned int cleanup_addr; /* epilogue code offset */
34738- bool seen_ld_abs;
34739-};
34740-
34741-static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
34742- int oldproglen, struct jit_context *ctx)
34743-{
34744- struct sock_filter_int *insn = bpf_prog->insnsi;
34745- int insn_cnt = bpf_prog->len;
34746- u8 temp[64];
34747- int i;
34748- int proglen = 0;
34749- u8 *prog = temp;
34750- int stacksize = MAX_BPF_STACK +
34751- 32 /* space for rbx, r13, r14, r15 */ +
34752- 8 /* space for skb_copy_bits() buffer */;
34753-
34754- EMIT1(0x55); /* push rbp */
34755- EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
34756-
34757- /* sub rsp, stacksize */
34758- EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
34759-
34760- /* all classic BPF filters use R6(rbx) save it */
34761-
34762- /* mov qword ptr [rbp-X],rbx */
34763- EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
34764-
34765- /* sk_convert_filter() maps classic BPF register X to R7 and uses R8
34766- * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
34767- * R8(r14). R9(r15) spill could be made conditional, but there is only
34768- * one 'bpf_error' return path out of helper functions inside bpf_jit.S
34769- * The overhead of extra spill is negligible for any filter other
34770- * than synthetic ones. Therefore not worth adding complexity.
34771- */
34772-
34773- /* mov qword ptr [rbp-X],r13 */
34774- EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
34775- /* mov qword ptr [rbp-X],r14 */
34776- EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
34777- /* mov qword ptr [rbp-X],r15 */
34778- EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
34779-
34780- /* clear A and X registers */
34781- EMIT2(0x31, 0xc0); /* xor eax, eax */
34782- EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
34783-
34784- if (ctx->seen_ld_abs) {
34785- /* r9d : skb->len - skb->data_len (headlen)
34786- * r10 : skb->data
34787- */
34788- if (is_imm8(offsetof(struct sk_buff, len)))
34789- /* mov %r9d, off8(%rdi) */
34790- EMIT4(0x44, 0x8b, 0x4f,
34791- offsetof(struct sk_buff, len));
34792- else
34793- /* mov %r9d, off32(%rdi) */
34794- EMIT3_off32(0x44, 0x8b, 0x8f,
34795- offsetof(struct sk_buff, len));
34796-
34797- if (is_imm8(offsetof(struct sk_buff, data_len)))
34798- /* sub %r9d, off8(%rdi) */
34799- EMIT4(0x44, 0x2b, 0x4f,
34800- offsetof(struct sk_buff, data_len));
34801- else
34802- EMIT3_off32(0x44, 0x2b, 0x8f,
34803- offsetof(struct sk_buff, data_len));
34804-
34805- if (is_imm8(offsetof(struct sk_buff, data)))
34806- /* mov %r10, off8(%rdi) */
34807- EMIT4(0x4c, 0x8b, 0x57,
34808- offsetof(struct sk_buff, data));
34809- else
34810- /* mov %r10, off32(%rdi) */
34811- EMIT3_off32(0x4c, 0x8b, 0x97,
34812- offsetof(struct sk_buff, data));
34813- }
34814-
34815- for (i = 0; i < insn_cnt; i++, insn++) {
34816- const s32 imm32 = insn->imm;
34817- u32 dst_reg = insn->dst_reg;
34818- u32 src_reg = insn->src_reg;
34819- u8 b1 = 0, b2 = 0, b3 = 0;
34820- s64 jmp_offset;
34821- u8 jmp_cond;
34822- int ilen;
34823- u8 *func;
34824-
34825- switch (insn->code) {
34826- /* ALU */
34827- case BPF_ALU | BPF_ADD | BPF_X:
34828- case BPF_ALU | BPF_SUB | BPF_X:
34829- case BPF_ALU | BPF_AND | BPF_X:
34830- case BPF_ALU | BPF_OR | BPF_X:
34831- case BPF_ALU | BPF_XOR | BPF_X:
34832- case BPF_ALU64 | BPF_ADD | BPF_X:
34833- case BPF_ALU64 | BPF_SUB | BPF_X:
34834- case BPF_ALU64 | BPF_AND | BPF_X:
34835- case BPF_ALU64 | BPF_OR | BPF_X:
34836- case BPF_ALU64 | BPF_XOR | BPF_X:
34837- switch (BPF_OP(insn->code)) {
34838- case BPF_ADD: b2 = 0x01; break;
34839- case BPF_SUB: b2 = 0x29; break;
34840- case BPF_AND: b2 = 0x21; break;
34841- case BPF_OR: b2 = 0x09; break;
34842- case BPF_XOR: b2 = 0x31; break;
34843- }
34844- if (BPF_CLASS(insn->code) == BPF_ALU64)
34845- EMIT1(add_2mod(0x48, dst_reg, src_reg));
34846- else if (is_ereg(dst_reg) || is_ereg(src_reg))
34847- EMIT1(add_2mod(0x40, dst_reg, src_reg));
34848- EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
34849- break;
34850-
34851- /* mov dst, src */
34852- case BPF_ALU64 | BPF_MOV | BPF_X:
34853- EMIT_mov(dst_reg, src_reg);
34854- break;
34855-
34856- /* mov32 dst, src */
34857- case BPF_ALU | BPF_MOV | BPF_X:
34858- if (is_ereg(dst_reg) || is_ereg(src_reg))
34859- EMIT1(add_2mod(0x40, dst_reg, src_reg));
34860- EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
34861- break;
34862-
34863- /* neg dst */
34864- case BPF_ALU | BPF_NEG:
34865- case BPF_ALU64 | BPF_NEG:
34866- if (BPF_CLASS(insn->code) == BPF_ALU64)
34867- EMIT1(add_1mod(0x48, dst_reg));
34868- else if (is_ereg(dst_reg))
34869- EMIT1(add_1mod(0x40, dst_reg));
34870- EMIT2(0xF7, add_1reg(0xD8, dst_reg));
34871- break;
34872-
34873- case BPF_ALU | BPF_ADD | BPF_K:
34874- case BPF_ALU | BPF_SUB | BPF_K:
34875- case BPF_ALU | BPF_AND | BPF_K:
34876- case BPF_ALU | BPF_OR | BPF_K:
34877- case BPF_ALU | BPF_XOR | BPF_K:
34878- case BPF_ALU64 | BPF_ADD | BPF_K:
34879- case BPF_ALU64 | BPF_SUB | BPF_K:
34880- case BPF_ALU64 | BPF_AND | BPF_K:
34881- case BPF_ALU64 | BPF_OR | BPF_K:
34882- case BPF_ALU64 | BPF_XOR | BPF_K:
34883- if (BPF_CLASS(insn->code) == BPF_ALU64)
34884- EMIT1(add_1mod(0x48, dst_reg));
34885- else if (is_ereg(dst_reg))
34886- EMIT1(add_1mod(0x40, dst_reg));
34887-
34888- switch (BPF_OP(insn->code)) {
34889- case BPF_ADD: b3 = 0xC0; break;
34890- case BPF_SUB: b3 = 0xE8; break;
34891- case BPF_AND: b3 = 0xE0; break;
34892- case BPF_OR: b3 = 0xC8; break;
34893- case BPF_XOR: b3 = 0xF0; break;
34894- }
34895-
34896- if (is_imm8(imm32))
34897- EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
34898- else
34899- EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
34900- break;
34901-
34902- case BPF_ALU64 | BPF_MOV | BPF_K:
34903- /* optimization: if imm32 is positive,
34904- * use 'mov eax, imm32' (which zero-extends imm32)
34905- * to save 2 bytes
34906- */
34907- if (imm32 < 0) {
34908- /* 'mov rax, imm32' sign extends imm32 */
34909- b1 = add_1mod(0x48, dst_reg);
34910- b2 = 0xC7;
34911- b3 = 0xC0;
34912- EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
34913- break;
34914- }
34915-
34916- case BPF_ALU | BPF_MOV | BPF_K:
34917- /* mov %eax, imm32 */
34918- if (is_ereg(dst_reg))
34919- EMIT1(add_1mod(0x40, dst_reg));
34920- EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
34921- break;
34922-
34923- /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
34924- case BPF_ALU | BPF_MOD | BPF_X:
34925- case BPF_ALU | BPF_DIV | BPF_X:
34926- case BPF_ALU | BPF_MOD | BPF_K:
34927- case BPF_ALU | BPF_DIV | BPF_K:
34928- case BPF_ALU64 | BPF_MOD | BPF_X:
34929- case BPF_ALU64 | BPF_DIV | BPF_X:
34930- case BPF_ALU64 | BPF_MOD | BPF_K:
34931- case BPF_ALU64 | BPF_DIV | BPF_K:
34932- EMIT1(0x50); /* push rax */
34933- EMIT1(0x52); /* push rdx */
34934-
34935- if (BPF_SRC(insn->code) == BPF_X)
34936- /* mov r11, src_reg */
34937- EMIT_mov(AUX_REG, src_reg);
34938- else
34939- /* mov r11, imm32 */
34940- EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
34941-
34942- /* mov rax, dst_reg */
34943- EMIT_mov(BPF_REG_0, dst_reg);
34944-
34945- /* xor edx, edx
34946- * equivalent to 'xor rdx, rdx', but one byte less
34947- */
34948- EMIT2(0x31, 0xd2);
34949-
34950- if (BPF_SRC(insn->code) == BPF_X) {
34951- /* if (src_reg == 0) return 0 */
34952-
34953- /* cmp r11, 0 */
34954- EMIT4(0x49, 0x83, 0xFB, 0x00);
34955-
34956- /* jne .+9 (skip over pop, pop, xor and jmp) */
34957- EMIT2(X86_JNE, 1 + 1 + 2 + 5);
34958- EMIT1(0x5A); /* pop rdx */
34959- EMIT1(0x58); /* pop rax */
34960- EMIT2(0x31, 0xc0); /* xor eax, eax */
34961-
34962- /* jmp cleanup_addr
34963- * addrs[i] - 11, because there are 11 bytes
34964- * after this insn: div, mov, pop, pop, mov
34965- */
34966- jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
34967- EMIT1_off32(0xE9, jmp_offset);
34968- }
34969-
34970- if (BPF_CLASS(insn->code) == BPF_ALU64)
34971- /* div r11 */
34972- EMIT3(0x49, 0xF7, 0xF3);
34973- else
34974- /* div r11d */
34975- EMIT3(0x41, 0xF7, 0xF3);
34976-
34977- if (BPF_OP(insn->code) == BPF_MOD)
34978- /* mov r11, rdx */
34979- EMIT3(0x49, 0x89, 0xD3);
34980- else
34981- /* mov r11, rax */
34982- EMIT3(0x49, 0x89, 0xC3);
34983-
34984- EMIT1(0x5A); /* pop rdx */
34985- EMIT1(0x58); /* pop rax */
34986-
34987- /* mov dst_reg, r11 */
34988- EMIT_mov(dst_reg, AUX_REG);
34989- break;
34990-
34991- case BPF_ALU | BPF_MUL | BPF_K:
34992- case BPF_ALU | BPF_MUL | BPF_X:
34993- case BPF_ALU64 | BPF_MUL | BPF_K:
34994- case BPF_ALU64 | BPF_MUL | BPF_X:
34995- EMIT1(0x50); /* push rax */
34996- EMIT1(0x52); /* push rdx */
34997-
34998- /* mov r11, dst_reg */
34999- EMIT_mov(AUX_REG, dst_reg);
35000-
35001- if (BPF_SRC(insn->code) == BPF_X)
35002- /* mov rax, src_reg */
35003- EMIT_mov(BPF_REG_0, src_reg);
35004- else
35005- /* mov rax, imm32 */
35006- EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
35007-
35008- if (BPF_CLASS(insn->code) == BPF_ALU64)
35009- EMIT1(add_1mod(0x48, AUX_REG));
35010- else if (is_ereg(AUX_REG))
35011- EMIT1(add_1mod(0x40, AUX_REG));
35012- /* mul(q) r11 */
35013- EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
35014-
35015- /* mov r11, rax */
35016- EMIT_mov(AUX_REG, BPF_REG_0);
35017-
35018- EMIT1(0x5A); /* pop rdx */
35019- EMIT1(0x58); /* pop rax */
35020-
35021- /* mov dst_reg, r11 */
35022- EMIT_mov(dst_reg, AUX_REG);
35023- break;
35024-
35025- /* shifts */
35026- case BPF_ALU | BPF_LSH | BPF_K:
35027- case BPF_ALU | BPF_RSH | BPF_K:
35028- case BPF_ALU | BPF_ARSH | BPF_K:
35029- case BPF_ALU64 | BPF_LSH | BPF_K:
35030- case BPF_ALU64 | BPF_RSH | BPF_K:
35031- case BPF_ALU64 | BPF_ARSH | BPF_K:
35032- if (BPF_CLASS(insn->code) == BPF_ALU64)
35033- EMIT1(add_1mod(0x48, dst_reg));
35034- else if (is_ereg(dst_reg))
35035- EMIT1(add_1mod(0x40, dst_reg));
35036-
35037- switch (BPF_OP(insn->code)) {
35038- case BPF_LSH: b3 = 0xE0; break;
35039- case BPF_RSH: b3 = 0xE8; break;
35040- case BPF_ARSH: b3 = 0xF8; break;
35041- }
35042- EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
35043- break;
35044-
35045- case BPF_ALU | BPF_END | BPF_FROM_BE:
35046- switch (imm32) {
35047- case 16:
35048- /* emit 'ror %ax, 8' to swap lower 2 bytes */
35049- EMIT1(0x66);
35050- if (is_ereg(dst_reg))
35051- EMIT1(0x41);
35052- EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
35053- break;
35054- case 32:
35055- /* emit 'bswap eax' to swap lower 4 bytes */
35056- if (is_ereg(dst_reg))
35057- EMIT2(0x41, 0x0F);
35058- else
35059- EMIT1(0x0F);
35060- EMIT1(add_1reg(0xC8, dst_reg));
35061- break;
35062- case 64:
35063- /* emit 'bswap rax' to swap 8 bytes */
35064- EMIT3(add_1mod(0x48, dst_reg), 0x0F,
35065- add_1reg(0xC8, dst_reg));
35066- break;
35067- }
35068- break;
35069-
35070- case BPF_ALU | BPF_END | BPF_FROM_LE:
35071- break;
35072-
35073- /* ST: *(u8*)(dst_reg + off) = imm */
35074- case BPF_ST | BPF_MEM | BPF_B:
35075- if (is_ereg(dst_reg))
35076- EMIT2(0x41, 0xC6);
35077- else
35078- EMIT1(0xC6);
35079- goto st;
35080- case BPF_ST | BPF_MEM | BPF_H:
35081- if (is_ereg(dst_reg))
35082- EMIT3(0x66, 0x41, 0xC7);
35083- else
35084- EMIT2(0x66, 0xC7);
35085- goto st;
35086- case BPF_ST | BPF_MEM | BPF_W:
35087- if (is_ereg(dst_reg))
35088- EMIT2(0x41, 0xC7);
35089- else
35090- EMIT1(0xC7);
35091- goto st;
35092- case BPF_ST | BPF_MEM | BPF_DW:
35093- EMIT2(add_1mod(0x48, dst_reg), 0xC7);
35094-
35095-st: if (is_imm8(insn->off))
35096- EMIT2(add_1reg(0x40, dst_reg), insn->off);
35097- else
35098- EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
35099-
35100- EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
35101- break;
35102-
35103- /* STX: *(u8*)(dst_reg + off) = src_reg */
35104- case BPF_STX | BPF_MEM | BPF_B:
35105- /* emit 'mov byte ptr [rax + off], al' */
35106- if (is_ereg(dst_reg) || is_ereg(src_reg) ||
35107- /* have to add extra byte for x86 SIL, DIL regs */
35108- src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
35109- EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
35110- else
35111- EMIT1(0x88);
35112- goto stx;
35113- case BPF_STX | BPF_MEM | BPF_H:
35114- if (is_ereg(dst_reg) || is_ereg(src_reg))
35115- EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
35116- else
35117- EMIT2(0x66, 0x89);
35118- goto stx;
35119- case BPF_STX | BPF_MEM | BPF_W:
35120- if (is_ereg(dst_reg) || is_ereg(src_reg))
35121- EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
35122- else
35123- EMIT1(0x89);
35124- goto stx;
35125- case BPF_STX | BPF_MEM | BPF_DW:
35126- EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
35127-stx: if (is_imm8(insn->off))
35128- EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
35129- else
35130- EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
35131- insn->off);
35132- break;
35133-
35134- /* LDX: dst_reg = *(u8*)(src_reg + off) */
35135- case BPF_LDX | BPF_MEM | BPF_B:
35136- /* emit 'movzx rax, byte ptr [rax + off]' */
35137- EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
35138- goto ldx;
35139- case BPF_LDX | BPF_MEM | BPF_H:
35140- /* emit 'movzx rax, word ptr [rax + off]' */
35141- EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
35142- goto ldx;
35143- case BPF_LDX | BPF_MEM | BPF_W:
35144- /* emit 'mov eax, dword ptr [rax+0x14]' */
35145- if (is_ereg(dst_reg) || is_ereg(src_reg))
35146- EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
35147- else
35148- EMIT1(0x8B);
35149- goto ldx;
35150- case BPF_LDX | BPF_MEM | BPF_DW:
35151- /* emit 'mov rax, qword ptr [rax+0x14]' */
35152- EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
35153-ldx: /* if insn->off == 0 we can save one extra byte, but
35154- * special case of x86 r13 which always needs an offset
35155- * is not worth the hassle
35156- */
35157- if (is_imm8(insn->off))
35158- EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
35159- else
35160- EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
35161- insn->off);
35162- break;
35163-
35164- /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
35165- case BPF_STX | BPF_XADD | BPF_W:
35166- /* emit 'lock add dword ptr [rax + off], eax' */
35167- if (is_ereg(dst_reg) || is_ereg(src_reg))
35168- EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
35169- else
35170- EMIT2(0xF0, 0x01);
35171- goto xadd;
35172- case BPF_STX | BPF_XADD | BPF_DW:
35173- EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
35174-xadd: if (is_imm8(insn->off))
35175- EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
35176- else
35177- EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
35178- insn->off);
35179- break;
35180-
35181- /* call */
35182- case BPF_JMP | BPF_CALL:
35183- func = (u8 *) __bpf_call_base + imm32;
35184- jmp_offset = func - (image + addrs[i]);
35185- if (ctx->seen_ld_abs) {
35186- EMIT2(0x41, 0x52); /* push %r10 */
35187- EMIT2(0x41, 0x51); /* push %r9 */
35188- /* need to adjust jmp offset, since
35189- * pop %r9, pop %r10 take 4 bytes after call insn
35190- */
35191- jmp_offset += 4;
35192- }
35193- if (!imm32 || !is_simm32(jmp_offset)) {
35194- pr_err("unsupported bpf func %d addr %p image %p\n",
35195- imm32, func, image);
35196- return -EINVAL;
35197- }
35198- EMIT1_off32(0xE8, jmp_offset);
35199- if (ctx->seen_ld_abs) {
35200- EMIT2(0x41, 0x59); /* pop %r9 */
35201- EMIT2(0x41, 0x5A); /* pop %r10 */
35202- }
35203- break;
35204-
35205- /* cond jump */
35206- case BPF_JMP | BPF_JEQ | BPF_X:
35207- case BPF_JMP | BPF_JNE | BPF_X:
35208- case BPF_JMP | BPF_JGT | BPF_X:
35209- case BPF_JMP | BPF_JGE | BPF_X:
35210- case BPF_JMP | BPF_JSGT | BPF_X:
35211- case BPF_JMP | BPF_JSGE | BPF_X:
35212- /* cmp dst_reg, src_reg */
35213- EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
35214- add_2reg(0xC0, dst_reg, src_reg));
35215- goto emit_cond_jmp;
35216-
35217- case BPF_JMP | BPF_JSET | BPF_X:
35218- /* test dst_reg, src_reg */
35219- EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
35220- add_2reg(0xC0, dst_reg, src_reg));
35221- goto emit_cond_jmp;
35222-
35223- case BPF_JMP | BPF_JSET | BPF_K:
35224- /* test dst_reg, imm32 */
35225- EMIT1(add_1mod(0x48, dst_reg));
35226- EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
35227- goto emit_cond_jmp;
35228-
35229- case BPF_JMP | BPF_JEQ | BPF_K:
35230- case BPF_JMP | BPF_JNE | BPF_K:
35231- case BPF_JMP | BPF_JGT | BPF_K:
35232- case BPF_JMP | BPF_JGE | BPF_K:
35233- case BPF_JMP | BPF_JSGT | BPF_K:
35234- case BPF_JMP | BPF_JSGE | BPF_K:
35235- /* cmp dst_reg, imm8/32 */
35236- EMIT1(add_1mod(0x48, dst_reg));
35237-
35238- if (is_imm8(imm32))
35239- EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
35240- else
35241- EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
35242-
35243-emit_cond_jmp: /* convert BPF opcode to x86 */
35244- switch (BPF_OP(insn->code)) {
35245- case BPF_JEQ:
35246- jmp_cond = X86_JE;
35247- break;
35248- case BPF_JSET:
35249- case BPF_JNE:
35250- jmp_cond = X86_JNE;
35251- break;
35252- case BPF_JGT:
35253- /* GT is unsigned '>', JA in x86 */
35254- jmp_cond = X86_JA;
35255- break;
35256- case BPF_JGE:
35257- /* GE is unsigned '>=', JAE in x86 */
35258- jmp_cond = X86_JAE;
35259- break;
35260- case BPF_JSGT:
35261- /* signed '>', GT in x86 */
35262- jmp_cond = X86_JG;
35263- break;
35264- case BPF_JSGE:
35265- /* signed '>=', GE in x86 */
35266- jmp_cond = X86_JGE;
35267- break;
35268- default: /* to silence gcc warning */
35269- return -EFAULT;
35270- }
35271- jmp_offset = addrs[i + insn->off] - addrs[i];
35272- if (is_imm8(jmp_offset)) {
35273- EMIT2(jmp_cond, jmp_offset);
35274- } else if (is_simm32(jmp_offset)) {
35275- EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
35276- } else {
35277- pr_err("cond_jmp gen bug %llx\n", jmp_offset);
35278- return -EFAULT;
35279- }
35280-
35281- break;
35282-
35283- case BPF_JMP | BPF_JA:
35284- jmp_offset = addrs[i + insn->off] - addrs[i];
35285- if (!jmp_offset)
35286- /* optimize out nop jumps */
35287- break;
35288-emit_jmp:
35289- if (is_imm8(jmp_offset)) {
35290- EMIT2(0xEB, jmp_offset);
35291- } else if (is_simm32(jmp_offset)) {
35292- EMIT1_off32(0xE9, jmp_offset);
35293- } else {
35294- pr_err("jmp gen bug %llx\n", jmp_offset);
35295- return -EFAULT;
35296- }
35297- break;
35298-
35299- case BPF_LD | BPF_IND | BPF_W:
35300- func = sk_load_word;
35301- goto common_load;
35302- case BPF_LD | BPF_ABS | BPF_W:
35303- func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
35304-common_load: ctx->seen_ld_abs = true;
35305- jmp_offset = func - (image + addrs[i]);
35306- if (!func || !is_simm32(jmp_offset)) {
35307- pr_err("unsupported bpf func %d addr %p image %p\n",
35308- imm32, func, image);
35309- return -EINVAL;
35310- }
35311- if (BPF_MODE(insn->code) == BPF_ABS) {
35312- /* mov %esi, imm32 */
35313- EMIT1_off32(0xBE, imm32);
35314- } else {
35315- /* mov %rsi, src_reg */
35316- EMIT_mov(BPF_REG_2, src_reg);
35317- if (imm32) {
35318- if (is_imm8(imm32))
35319- /* add %esi, imm8 */
35320- EMIT3(0x83, 0xC6, imm32);
35321- else
35322- /* add %esi, imm32 */
35323- EMIT2_off32(0x81, 0xC6, imm32);
35324- }
35325- }
35326- /* skb pointer is in R6 (%rbx), it will be copied into
35327- * %rdi if skb_copy_bits() call is necessary.
35328- * sk_load_* helpers also use %r10 and %r9d.
35329- * See bpf_jit.S
35330- */
35331- EMIT1_off32(0xE8, jmp_offset); /* call */
35332- break;
35333-
35334- case BPF_LD | BPF_IND | BPF_H:
35335- func = sk_load_half;
35336- goto common_load;
35337- case BPF_LD | BPF_ABS | BPF_H:
35338- func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
35339- goto common_load;
35340- case BPF_LD | BPF_IND | BPF_B:
35341- func = sk_load_byte;
35342- goto common_load;
35343- case BPF_LD | BPF_ABS | BPF_B:
35344- func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
35345- goto common_load;
35346-
35347- case BPF_JMP | BPF_EXIT:
35348- if (i != insn_cnt - 1) {
35349- jmp_offset = ctx->cleanup_addr - addrs[i];
35350- goto emit_jmp;
35351- }
35352- /* update cleanup_addr */
35353- ctx->cleanup_addr = proglen;
35354- /* mov rbx, qword ptr [rbp-X] */
35355- EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
35356- /* mov r13, qword ptr [rbp-X] */
35357- EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
35358- /* mov r14, qword ptr [rbp-X] */
35359- EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
35360- /* mov r15, qword ptr [rbp-X] */
35361- EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
35362-
35363- EMIT1(0xC9); /* leave */
35364- EMIT1(0xC3); /* ret */
35365- break;
35366-
35367- default:
35368- /* By design x64 JIT should support all BPF instructions
35369- * This error will be seen if new instruction was added
35370- * to interpreter, but not to JIT
35371- * or if there is junk in sk_filter
35372- */
35373- pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
35374- return -EINVAL;
35375- }
35376-
35377- ilen = prog - temp;
35378- if (image) {
35379- if (unlikely(proglen + ilen > oldproglen)) {
35380- pr_err("bpf_jit_compile fatal error\n");
35381- return -EFAULT;
35382- }
35383- memcpy(image + proglen, temp, ilen);
35384- }
35385- proglen += ilen;
35386- addrs[i] = proglen;
35387- prog = temp;
35388- }
35389- return proglen;
35390-}
35391-
35392-void bpf_jit_compile(struct sk_filter *prog)
35393-{
35394-}
35395-
35396-void bpf_int_jit_compile(struct sk_filter *prog)
35397-{
35398- struct bpf_binary_header *header = NULL;
35399- int proglen, oldproglen = 0;
35400- struct jit_context ctx = {};
35401+void bpf_jit_compile(struct sk_filter *fp)
35402+{
35403+ u8 temp[MAX_INSTR_CODE_SIZE];
35404+ u8 *prog;
35405+ unsigned int proglen, oldproglen = 0;
35406+ int ilen, i;
35407+ int t_offset, f_offset;
35408+ u8 t_op, f_op, seen = 0, pass;
35409 u8 *image = NULL;
35410- int *addrs;
35411- int pass;
35412- int i;
35413+ u8 *header = NULL;
35414+ u8 *func;
35415+ int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
35416+ unsigned int cleanup_addr; /* epilogue code offset */
35417+ unsigned int *addrs;
35418+ const struct sock_filter *filter = fp->insns;
35419+ int flen = fp->len;
35420+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35421+ unsigned int randkey;
35422+#endif
35423
35424 if (!bpf_jit_enable)
35425 return;
35426
35427- if (!prog || !prog->len)
35428- return;
35429-
35430- addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
35431- if (!addrs)
35432+ addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
35433+ if (addrs == NULL)
35434 return;
35435
35436 /* Before first pass, make a rough estimation of addrs[]
35437- * each bpf instruction is translated to less than 64 bytes
35438+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
35439 */
35440- for (proglen = 0, i = 0; i < prog->len; i++) {
35441- proglen += 64;
35442+ for (proglen = 0, i = 0; i < flen; i++) {
35443+ proglen += MAX_INSTR_CODE_SIZE;
35444 addrs[i] = proglen;
35445 }
35446- ctx.cleanup_addr = proglen;
35447+ cleanup_addr = proglen; /* epilogue address */
35448
35449 for (pass = 0; pass < 10; pass++) {
35450- proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
35451- if (proglen <= 0) {
35452- image = NULL;
35453- if (header)
35454- module_free(NULL, header);
35455- goto out;
35456+ u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
35457+ /* no prologue/epilogue for trivial filters (RET something) */
35458+ proglen = 0;
35459+ prog = temp;
35460+
35461+ if (seen_or_pass0) {
35462+ EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
35463+ EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
35464+ /* note : must save %rbx in case bpf_error is hit */
35465+ if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
35466+ EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
35467+ if (seen_or_pass0 & SEEN_XREG)
35468+ CLEAR_X(); /* make sure we dont leek kernel memory */
35469+
35470+ /*
35471+ * If this filter needs to access skb data,
35472+ * loads r9 and r8 with :
35473+ * r9 = skb->len - skb->data_len
35474+ * r8 = skb->data
35475+ */
35476+ if (seen_or_pass0 & SEEN_DATAREF) {
35477+ if (offsetof(struct sk_buff, len) <= 127)
35478+ /* mov off8(%rdi),%r9d */
35479+ EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
35480+ else {
35481+ /* mov off32(%rdi),%r9d */
35482+ EMIT3(0x44, 0x8b, 0x8f);
35483+ EMIT(offsetof(struct sk_buff, len), 4);
35484+ }
35485+ if (is_imm8(offsetof(struct sk_buff, data_len)))
35486+ /* sub off8(%rdi),%r9d */
35487+ EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
35488+ else {
35489+ EMIT3(0x44, 0x2b, 0x8f);
35490+ EMIT(offsetof(struct sk_buff, data_len), 4);
35491+ }
35492+
35493+ if (is_imm8(offsetof(struct sk_buff, data)))
35494+ /* mov off8(%rdi),%r8 */
35495+ EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
35496+ else {
35497+ /* mov off32(%rdi),%r8 */
35498+ EMIT3(0x4c, 0x8b, 0x87);
35499+ EMIT(offsetof(struct sk_buff, data), 4);
35500+ }
35501+ }
35502 }
35503+
35504+ switch (filter[0].code) {
35505+ case BPF_S_RET_K:
35506+ case BPF_S_LD_W_LEN:
35507+ case BPF_S_ANC_PROTOCOL:
35508+ case BPF_S_ANC_IFINDEX:
35509+ case BPF_S_ANC_MARK:
35510+ case BPF_S_ANC_RXHASH:
35511+ case BPF_S_ANC_CPU:
35512+ case BPF_S_ANC_VLAN_TAG:
35513+ case BPF_S_ANC_VLAN_TAG_PRESENT:
35514+ case BPF_S_ANC_QUEUE:
35515+ case BPF_S_ANC_PKTTYPE:
35516+ case BPF_S_LD_W_ABS:
35517+ case BPF_S_LD_H_ABS:
35518+ case BPF_S_LD_B_ABS:
35519+ /* first instruction sets A register (or is RET 'constant') */
35520+ break;
35521+ default:
35522+ /* make sure we dont leak kernel information to user */
35523+ CLEAR_A(); /* A = 0 */
35524+ }
35525+
35526+ for (i = 0; i < flen; i++) {
35527+ unsigned int K = filter[i].k;
35528+
35529+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35530+ randkey = prandom_u32();
35531+#endif
35532+
35533+ switch (filter[i].code) {
35534+ case BPF_S_ALU_ADD_X: /* A += X; */
35535+ seen |= SEEN_XREG;
35536+ EMIT2(0x01, 0xd8); /* add %ebx,%eax */
35537+ break;
35538+ case BPF_S_ALU_ADD_K: /* A += K; */
35539+ if (!K)
35540+ break;
35541+ if (is_imm8(K))
35542+ EMIT3(0x83, 0xc0, K); /* add imm8,%eax */
35543+ else
35544+ EMIT1_off32(0x05, K); /* add imm32,%eax */
35545+ break;
35546+ case BPF_S_ALU_SUB_X: /* A -= X; */
35547+ seen |= SEEN_XREG;
35548+ EMIT2(0x29, 0xd8); /* sub %ebx,%eax */
35549+ break;
35550+ case BPF_S_ALU_SUB_K: /* A -= K */
35551+ if (!K)
35552+ break;
35553+ if (is_imm8(K))
35554+ EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */
35555+ else
35556+ EMIT1_off32(0x2d, K); /* sub imm32,%eax */
35557+ break;
35558+ case BPF_S_ALU_MUL_X: /* A *= X; */
35559+ seen |= SEEN_XREG;
35560+ EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */
35561+ break;
35562+ case BPF_S_ALU_MUL_K: /* A *= K */
35563+ if (is_imm8(K))
35564+ EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
35565+ else
35566+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
35567+ break;
35568+ case BPF_S_ALU_DIV_X: /* A /= X; */
35569+ seen |= SEEN_XREG;
35570+ EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
35571+ if (pc_ret0 > 0) {
35572+ /* addrs[pc_ret0 - 1] is start address of target
35573+ * (addrs[i] - 4) is the address following this jmp
35574+ * ("xor %edx,%edx; div %ebx" being 4 bytes long)
35575+ */
35576+ EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
35577+ (addrs[i] - 4));
35578+ } else {
35579+ EMIT_COND_JMP(X86_JNE, 2 + 5);
35580+ CLEAR_A();
35581+ EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
35582+ }
35583+ EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
35584+ break;
35585+ case BPF_S_ALU_MOD_X: /* A %= X; */
35586+ seen |= SEEN_XREG;
35587+ EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
35588+ if (pc_ret0 > 0) {
35589+ /* addrs[pc_ret0 - 1] is start address of target
35590+ * (addrs[i] - 6) is the address following this jmp
35591+ * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
35592+ */
35593+ EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
35594+ (addrs[i] - 6));
35595+ } else {
35596+ EMIT_COND_JMP(X86_JNE, 2 + 5);
35597+ CLEAR_A();
35598+ EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */
35599+ }
35600+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
35601+ EMIT2(0xf7, 0xf3); /* div %ebx */
35602+ EMIT2(0x89, 0xd0); /* mov %edx,%eax */
35603+ break;
35604+ case BPF_S_ALU_MOD_K: /* A %= K; */
35605+ if (K == 1) {
35606+ CLEAR_A();
35607+ break;
35608+ }
35609+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
35610+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35611+ DILUTE_CONST_SEQUENCE(K, randkey);
35612+#else
35613+ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
35614+#endif
35615+ EMIT2(0xf7, 0xf1); /* div %ecx */
35616+ EMIT2(0x89, 0xd0); /* mov %edx,%eax */
35617+ break;
35618+ case BPF_S_ALU_DIV_K: /* A /= K */
35619+ if (K == 1)
35620+ break;
35621+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
35622+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35623+ DILUTE_CONST_SEQUENCE(K, randkey);
35624+#else
35625+ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
35626+#endif
35627+ EMIT2(0xf7, 0xf1); /* div %ecx */
35628+ break;
35629+ case BPF_S_ALU_AND_X:
35630+ seen |= SEEN_XREG;
35631+ EMIT2(0x21, 0xd8); /* and %ebx,%eax */
35632+ break;
35633+ case BPF_S_ALU_AND_K:
35634+ if (K >= 0xFFFFFF00) {
35635+ EMIT2(0x24, K & 0xFF); /* and imm8,%al */
35636+ } else if (K >= 0xFFFF0000) {
35637+ EMIT2(0x66, 0x25); /* and imm16,%ax */
35638+ EMIT(K, 2);
35639+ } else {
35640+ EMIT1_off32(0x25, K); /* and imm32,%eax */
35641+ }
35642+ break;
35643+ case BPF_S_ALU_OR_X:
35644+ seen |= SEEN_XREG;
35645+ EMIT2(0x09, 0xd8); /* or %ebx,%eax */
35646+ break;
35647+ case BPF_S_ALU_OR_K:
35648+ if (is_imm8(K))
35649+ EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
35650+ else
35651+ EMIT1_off32(0x0d, K); /* or imm32,%eax */
35652+ break;
35653+ case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
35654+ case BPF_S_ALU_XOR_X:
35655+ seen |= SEEN_XREG;
35656+ EMIT2(0x31, 0xd8); /* xor %ebx,%eax */
35657+ break;
35658+ case BPF_S_ALU_XOR_K: /* A ^= K; */
35659+ if (K == 0)
35660+ break;
35661+ if (is_imm8(K))
35662+ EMIT3(0x83, 0xf0, K); /* xor imm8,%eax */
35663+ else
35664+ EMIT1_off32(0x35, K); /* xor imm32,%eax */
35665+ break;
35666+ case BPF_S_ALU_LSH_X: /* A <<= X; */
35667+ seen |= SEEN_XREG;
35668+ EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */
35669+ break;
35670+ case BPF_S_ALU_LSH_K:
35671+ if (K == 0)
35672+ break;
35673+ else if (K == 1)
35674+ EMIT2(0xd1, 0xe0); /* shl %eax */
35675+ else
35676+ EMIT3(0xc1, 0xe0, K);
35677+ break;
35678+ case BPF_S_ALU_RSH_X: /* A >>= X; */
35679+ seen |= SEEN_XREG;
35680+ EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */
35681+ break;
35682+ case BPF_S_ALU_RSH_K: /* A >>= K; */
35683+ if (K == 0)
35684+ break;
35685+ else if (K == 1)
35686+ EMIT2(0xd1, 0xe8); /* shr %eax */
35687+ else
35688+ EMIT3(0xc1, 0xe8, K);
35689+ break;
35690+ case BPF_S_ALU_NEG:
35691+ EMIT2(0xf7, 0xd8); /* neg %eax */
35692+ break;
35693+ case BPF_S_RET_K:
35694+ if (!K) {
35695+ if (pc_ret0 == -1)
35696+ pc_ret0 = i;
35697+ CLEAR_A();
35698+ } else {
35699+ EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
35700+ }
35701+ /* fallinto */
35702+ case BPF_S_RET_A:
35703+ if (seen_or_pass0) {
35704+ if (i != flen - 1) {
35705+ EMIT_JMP(cleanup_addr - addrs[i]);
35706+ break;
35707+ }
35708+ if (seen_or_pass0 & SEEN_XREG)
35709+ EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
35710+ EMIT1(0xc9); /* leaveq */
35711+ }
35712+ EMIT1(0xc3); /* ret */
35713+ break;
35714+ case BPF_S_MISC_TAX: /* X = A */
35715+ seen |= SEEN_XREG;
35716+ EMIT2(0x89, 0xc3); /* mov %eax,%ebx */
35717+ break;
35718+ case BPF_S_MISC_TXA: /* A = X */
35719+ seen |= SEEN_XREG;
35720+ EMIT2(0x89, 0xd8); /* mov %ebx,%eax */
35721+ break;
35722+ case BPF_S_LD_IMM: /* A = K */
35723+ if (!K)
35724+ CLEAR_A();
35725+ else
35726+ EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
35727+ break;
35728+ case BPF_S_LDX_IMM: /* X = K */
35729+ seen |= SEEN_XREG;
35730+ if (!K)
35731+ CLEAR_X();
35732+ else
35733+ EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */
35734+ break;
35735+ case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
35736+ seen |= SEEN_MEM;
35737+ EMIT3(0x8b, 0x45, 0xf0 - K*4);
35738+ break;
35739+ case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
35740+ seen |= SEEN_XREG | SEEN_MEM;
35741+ EMIT3(0x8b, 0x5d, 0xf0 - K*4);
35742+ break;
35743+ case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
35744+ seen |= SEEN_MEM;
35745+ EMIT3(0x89, 0x45, 0xf0 - K*4);
35746+ break;
35747+ case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
35748+ seen |= SEEN_XREG | SEEN_MEM;
35749+ EMIT3(0x89, 0x5d, 0xf0 - K*4);
35750+ break;
35751+ case BPF_S_LD_W_LEN: /* A = skb->len; */
35752+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
35753+ if (is_imm8(offsetof(struct sk_buff, len)))
35754+ /* mov off8(%rdi),%eax */
35755+ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
35756+ else {
35757+ EMIT2(0x8b, 0x87);
35758+ EMIT(offsetof(struct sk_buff, len), 4);
35759+ }
35760+ break;
35761+ case BPF_S_LDX_W_LEN: /* X = skb->len; */
35762+ seen |= SEEN_XREG;
35763+ if (is_imm8(offsetof(struct sk_buff, len)))
35764+ /* mov off8(%rdi),%ebx */
35765+ EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
35766+ else {
35767+ EMIT2(0x8b, 0x9f);
35768+ EMIT(offsetof(struct sk_buff, len), 4);
35769+ }
35770+ break;
35771+ case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
35772+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
35773+ if (is_imm8(offsetof(struct sk_buff, protocol))) {
35774+ /* movzwl off8(%rdi),%eax */
35775+ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
35776+ } else {
35777+ EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
35778+ EMIT(offsetof(struct sk_buff, protocol), 4);
35779+ }
35780+ EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */
35781+ break;
35782+ case BPF_S_ANC_IFINDEX:
35783+ if (is_imm8(offsetof(struct sk_buff, dev))) {
35784+ /* movq off8(%rdi),%rax */
35785+ EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
35786+ } else {
35787+ EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
35788+ EMIT(offsetof(struct sk_buff, dev), 4);
35789+ }
35790+ EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */
35791+ EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
35792+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
35793+ EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */
35794+ EMIT(offsetof(struct net_device, ifindex), 4);
35795+ break;
35796+ case BPF_S_ANC_MARK:
35797+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
35798+ if (is_imm8(offsetof(struct sk_buff, mark))) {
35799+ /* mov off8(%rdi),%eax */
35800+ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
35801+ } else {
35802+ EMIT2(0x8b, 0x87);
35803+ EMIT(offsetof(struct sk_buff, mark), 4);
35804+ }
35805+ break;
35806+ case BPF_S_ANC_RXHASH:
35807+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
35808+ if (is_imm8(offsetof(struct sk_buff, hash))) {
35809+ /* mov off8(%rdi),%eax */
35810+ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash));
35811+ } else {
35812+ EMIT2(0x8b, 0x87);
35813+ EMIT(offsetof(struct sk_buff, hash), 4);
35814+ }
35815+ break;
35816+ case BPF_S_ANC_QUEUE:
35817+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
35818+ if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
35819+ /* movzwl off8(%rdi),%eax */
35820+ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
35821+ } else {
35822+ EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
35823+ EMIT(offsetof(struct sk_buff, queue_mapping), 4);
35824+ }
35825+ break;
35826+ case BPF_S_ANC_CPU:
35827+#ifdef CONFIG_SMP
35828+ EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
35829+ EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
35830+#else
35831+ CLEAR_A();
35832+#endif
35833+ break;
35834+ case BPF_S_ANC_VLAN_TAG:
35835+ case BPF_S_ANC_VLAN_TAG_PRESENT:
35836+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
35837+ if (is_imm8(offsetof(struct sk_buff, vlan_tci))) {
35838+ /* movzwl off8(%rdi),%eax */
35839+ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci));
35840+ } else {
35841+ EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
35842+ EMIT(offsetof(struct sk_buff, vlan_tci), 4);
35843+ }
35844+ BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
35845+ if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
35846+ EMIT3(0x80, 0xe4, 0xef); /* and $0xef,%ah */
35847+ } else {
35848+ EMIT3(0xc1, 0xe8, 0x0c); /* shr $0xc,%eax */
35849+ EMIT3(0x83, 0xe0, 0x01); /* and $0x1,%eax */
35850+ }
35851+ break;
35852+ case BPF_S_ANC_PKTTYPE:
35853+ {
35854+ int off = pkt_type_offset();
35855+
35856+ if (off < 0)
35857+ goto out;
35858+ if (is_imm8(off)) {
35859+ /* movzbl off8(%rdi),%eax */
35860+ EMIT4(0x0f, 0xb6, 0x47, off);
35861+ } else {
35862+ /* movbl off32(%rdi),%eax */
35863+ EMIT3(0x0f, 0xb6, 0x87);
35864+ EMIT(off, 4);
35865+ }
35866+ EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and $0x7,%eax */
35867+ break;
35868+ }
35869+ case BPF_S_LD_W_ABS:
35870+ func = CHOOSE_LOAD_FUNC(K, sk_load_word);
35871+common_load: seen |= SEEN_DATAREF;
35872+ t_offset = func - (image + addrs[i]);
35873+ EMIT1_off32(0xbe, K); /* mov imm32,%esi */
35874+ EMIT1_off32(0xe8, t_offset); /* call */
35875+ break;
35876+ case BPF_S_LD_H_ABS:
35877+ func = CHOOSE_LOAD_FUNC(K, sk_load_half);
35878+ goto common_load;
35879+ case BPF_S_LD_B_ABS:
35880+ func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
35881+ goto common_load;
35882+ case BPF_S_LDX_B_MSH:
35883+ func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
35884+ seen |= SEEN_DATAREF | SEEN_XREG;
35885+ t_offset = func - (image + addrs[i]);
35886+ EMIT1_off32(0xbe, K); /* mov imm32,%esi */
35887+ EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
35888+ break;
35889+ case BPF_S_LD_W_IND:
35890+ func = sk_load_word;
35891+common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
35892+ t_offset = func - (image + addrs[i]);
35893+ if (K) {
35894+ if (is_imm8(K)) {
35895+ EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
35896+ } else {
35897+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
35898+ }
35899+ } else {
35900+ EMIT2(0x89,0xde); /* mov %ebx,%esi */
35901+ }
35902+ EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */
35903+ break;
35904+ case BPF_S_LD_H_IND:
35905+ func = sk_load_half;
35906+ goto common_load_ind;
35907+ case BPF_S_LD_B_IND:
35908+ func = sk_load_byte;
35909+ goto common_load_ind;
35910+ case BPF_S_JMP_JA:
35911+ t_offset = addrs[i + K] - addrs[i];
35912+ EMIT_JMP(t_offset);
35913+ break;
35914+ COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
35915+ COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
35916+ COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
35917+ COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
35918+ COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
35919+ COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
35920+ COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
35921+ COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
35922+
35923+cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
35924+ t_offset = addrs[i + filter[i].jt] - addrs[i];
35925+
35926+ /* same targets, can avoid doing the test :) */
35927+ if (filter[i].jt == filter[i].jf) {
35928+ EMIT_JMP(t_offset);
35929+ break;
35930+ }
35931+
35932+ switch (filter[i].code) {
35933+ case BPF_S_JMP_JGT_X:
35934+ case BPF_S_JMP_JGE_X:
35935+ case BPF_S_JMP_JEQ_X:
35936+ seen |= SEEN_XREG;
35937+ EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */
35938+ break;
35939+ case BPF_S_JMP_JSET_X:
35940+ seen |= SEEN_XREG;
35941+ EMIT2(0x85, 0xd8); /* test %ebx,%eax */
35942+ break;
35943+ case BPF_S_JMP_JEQ_K:
35944+ if (K == 0) {
35945+ EMIT2(0x85, 0xc0); /* test %eax,%eax */
35946+ break;
35947+ }
35948+ case BPF_S_JMP_JGT_K:
35949+ case BPF_S_JMP_JGE_K:
35950+ if (K <= 127)
35951+ EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */
35952+ else
35953+ EMIT1_off32(0x3d, K); /* cmp imm32,%eax */
35954+ break;
35955+ case BPF_S_JMP_JSET_K:
35956+ if (K <= 0xFF)
35957+ EMIT2(0xa8, K); /* test imm8,%al */
35958+ else if (!(K & 0xFFFF00FF))
35959+ EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
35960+ else if (K <= 0xFFFF) {
35961+ EMIT2(0x66, 0xa9); /* test imm16,%ax */
35962+ EMIT(K, 2);
35963+ } else {
35964+ EMIT1_off32(0xa9, K); /* test imm32,%eax */
35965+ }
35966+ break;
35967+ }
35968+ if (filter[i].jt != 0) {
35969+ if (filter[i].jf && f_offset)
35970+ t_offset += is_near(f_offset) ? 2 : 5;
35971+ EMIT_COND_JMP(t_op, t_offset);
35972+ if (filter[i].jf)
35973+ EMIT_JMP(f_offset);
35974+ break;
35975+ }
35976+ EMIT_COND_JMP(f_op, f_offset);
35977+ break;
35978+ default:
35979+ /* hmm, too complex filter, give up with jit compiler */
35980+ goto out;
35981+ }
35982+ ilen = prog - temp;
35983+ if (image) {
35984+ if (unlikely(proglen + ilen > oldproglen)) {
35985+ pr_err("bpb_jit_compile fatal error\n");
35986+ kfree(addrs);
35987+ module_free_exec(NULL, image);
35988+ return;
35989+ }
35990+ pax_open_kernel();
35991+ memcpy(image + proglen, temp, ilen);
35992+ pax_close_kernel();
35993+ }
35994+ proglen += ilen;
35995+ addrs[i] = proglen;
35996+ prog = temp;
35997+ }
35998+ /* last bpf instruction is always a RET :
35999+ * use it to give the cleanup instruction(s) addr
36000+ */
36001+ cleanup_addr = proglen - 1; /* ret */
36002+ if (seen_or_pass0)
36003+ cleanup_addr -= 1; /* leaveq */
36004+ if (seen_or_pass0 & SEEN_XREG)
36005+ cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
36006+
36007 if (image) {
36008 if (proglen != oldproglen)
36009- pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
36010- proglen, oldproglen);
36011+ pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
36012 break;
36013 }
36014 if (proglen == oldproglen) {
36015@@ -918,32 +872,30 @@ void bpf_int_jit_compile(struct sk_filter *prog)
36016 }
36017
36018 if (bpf_jit_enable > 1)
36019- bpf_jit_dump(prog->len, proglen, 0, image);
36020+ bpf_jit_dump(flen, proglen, pass, image);
36021
36022 if (image) {
36023 bpf_flush_icache(header, image + proglen);
36024- set_memory_ro((unsigned long)header, header->pages);
36025- prog->bpf_func = (void *)image;
36026- prog->jited = 1;
36027+ fp->bpf_func = (void *)image;
36028 }
36029 out:
36030 kfree(addrs);
36031+ return;
36032 }
36033
36034 static void bpf_jit_free_deferred(struct work_struct *work)
36035 {
36036 struct sk_filter *fp = container_of(work, struct sk_filter, work);
36037 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
36038- struct bpf_binary_header *header = (void *)addr;
36039
36040- set_memory_rw(addr, header->pages);
36041- module_free(NULL, header);
36042+ set_memory_rw(addr, 1);
36043+ module_free_exec(NULL, (void *)addr);
36044 kfree(fp);
36045 }
36046
36047 void bpf_jit_free(struct sk_filter *fp)
36048 {
36049- if (fp->jited) {
36050+ if (fp->bpf_func != sk_run_filter) {
36051 INIT_WORK(&fp->work, bpf_jit_free_deferred);
36052 schedule_work(&fp->work);
36053 } else {
36054diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
36055index 5d04be5..2beeaa2 100644
36056--- a/arch/x86/oprofile/backtrace.c
36057+++ b/arch/x86/oprofile/backtrace.c
36058@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
36059 struct stack_frame_ia32 *fp;
36060 unsigned long bytes;
36061
36062- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
36063+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
36064 if (bytes != 0)
36065 return NULL;
36066
36067- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
36068+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
36069
36070 oprofile_add_trace(bufhead[0].return_address);
36071
36072@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
36073 struct stack_frame bufhead[2];
36074 unsigned long bytes;
36075
36076- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
36077+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
36078 if (bytes != 0)
36079 return NULL;
36080
36081@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
36082 {
36083 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
36084
36085- if (!user_mode_vm(regs)) {
36086+ if (!user_mode(regs)) {
36087 unsigned long stack = kernel_stack_pointer(regs);
36088 if (depth)
36089 dump_trace(NULL, regs, (unsigned long *)stack, 0,
36090diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
36091index 379e8bd..6386e09 100644
36092--- a/arch/x86/oprofile/nmi_int.c
36093+++ b/arch/x86/oprofile/nmi_int.c
36094@@ -23,6 +23,7 @@
36095 #include <asm/nmi.h>
36096 #include <asm/msr.h>
36097 #include <asm/apic.h>
36098+#include <asm/pgtable.h>
36099
36100 #include "op_counter.h"
36101 #include "op_x86_model.h"
36102@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
36103 if (ret)
36104 return ret;
36105
36106- if (!model->num_virt_counters)
36107- model->num_virt_counters = model->num_counters;
36108+ if (!model->num_virt_counters) {
36109+ pax_open_kernel();
36110+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
36111+ pax_close_kernel();
36112+ }
36113
36114 mux_init(ops);
36115
36116diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
36117index 50d86c0..7985318 100644
36118--- a/arch/x86/oprofile/op_model_amd.c
36119+++ b/arch/x86/oprofile/op_model_amd.c
36120@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
36121 num_counters = AMD64_NUM_COUNTERS;
36122 }
36123
36124- op_amd_spec.num_counters = num_counters;
36125- op_amd_spec.num_controls = num_counters;
36126- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
36127+ pax_open_kernel();
36128+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
36129+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
36130+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
36131+ pax_close_kernel();
36132
36133 return 0;
36134 }
36135diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
36136index d90528e..0127e2b 100644
36137--- a/arch/x86/oprofile/op_model_ppro.c
36138+++ b/arch/x86/oprofile/op_model_ppro.c
36139@@ -19,6 +19,7 @@
36140 #include <asm/msr.h>
36141 #include <asm/apic.h>
36142 #include <asm/nmi.h>
36143+#include <asm/pgtable.h>
36144
36145 #include "op_x86_model.h"
36146 #include "op_counter.h"
36147@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
36148
36149 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
36150
36151- op_arch_perfmon_spec.num_counters = num_counters;
36152- op_arch_perfmon_spec.num_controls = num_counters;
36153+ pax_open_kernel();
36154+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
36155+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
36156+ pax_close_kernel();
36157 }
36158
36159 static int arch_perfmon_init(struct oprofile_operations *ignore)
36160diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
36161index 71e8a67..6a313bb 100644
36162--- a/arch/x86/oprofile/op_x86_model.h
36163+++ b/arch/x86/oprofile/op_x86_model.h
36164@@ -52,7 +52,7 @@ struct op_x86_model_spec {
36165 void (*switch_ctrl)(struct op_x86_model_spec const *model,
36166 struct op_msrs const * const msrs);
36167 #endif
36168-};
36169+} __do_const;
36170
36171 struct op_counter_config;
36172
36173diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
36174index 84b9d67..260e5ff 100644
36175--- a/arch/x86/pci/intel_mid_pci.c
36176+++ b/arch/x86/pci/intel_mid_pci.c
36177@@ -245,7 +245,7 @@ int __init intel_mid_pci_init(void)
36178 pr_info("Intel MID platform detected, using MID PCI ops\n");
36179 pci_mmcfg_late_init();
36180 pcibios_enable_irq = intel_mid_pci_irq_enable;
36181- pci_root_ops = intel_mid_pci_ops;
36182+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
36183 pci_soc_mode = 1;
36184 /* Continue with standard init */
36185 return 1;
36186diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
36187index 84112f5..6334d60 100644
36188--- a/arch/x86/pci/irq.c
36189+++ b/arch/x86/pci/irq.c
36190@@ -50,7 +50,7 @@ struct irq_router {
36191 struct irq_router_handler {
36192 u16 vendor;
36193 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
36194-};
36195+} __do_const;
36196
36197 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
36198 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
36199@@ -790,7 +790,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
36200 return 0;
36201 }
36202
36203-static __initdata struct irq_router_handler pirq_routers[] = {
36204+static __initconst const struct irq_router_handler pirq_routers[] = {
36205 { PCI_VENDOR_ID_INTEL, intel_router_probe },
36206 { PCI_VENDOR_ID_AL, ali_router_probe },
36207 { PCI_VENDOR_ID_ITE, ite_router_probe },
36208@@ -817,7 +817,7 @@ static struct pci_dev *pirq_router_dev;
36209 static void __init pirq_find_router(struct irq_router *r)
36210 {
36211 struct irq_routing_table *rt = pirq_table;
36212- struct irq_router_handler *h;
36213+ const struct irq_router_handler *h;
36214
36215 #ifdef CONFIG_PCI_BIOS
36216 if (!rt->signature) {
36217@@ -1090,7 +1090,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
36218 return 0;
36219 }
36220
36221-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
36222+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
36223 {
36224 .callback = fix_broken_hp_bios_irq9,
36225 .ident = "HP Pavilion N5400 Series Laptop",
36226diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
36227index c77b24a..c979855 100644
36228--- a/arch/x86/pci/pcbios.c
36229+++ b/arch/x86/pci/pcbios.c
36230@@ -79,7 +79,7 @@ union bios32 {
36231 static struct {
36232 unsigned long address;
36233 unsigned short segment;
36234-} bios32_indirect = { 0, __KERNEL_CS };
36235+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
36236
36237 /*
36238 * Returns the entry point for the given service, NULL on error
36239@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
36240 unsigned long length; /* %ecx */
36241 unsigned long entry; /* %edx */
36242 unsigned long flags;
36243+ struct desc_struct d, *gdt;
36244
36245 local_irq_save(flags);
36246- __asm__("lcall *(%%edi); cld"
36247+
36248+ gdt = get_cpu_gdt_table(smp_processor_id());
36249+
36250+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
36251+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
36252+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
36253+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
36254+
36255+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
36256 : "=a" (return_code),
36257 "=b" (address),
36258 "=c" (length),
36259 "=d" (entry)
36260 : "0" (service),
36261 "1" (0),
36262- "D" (&bios32_indirect));
36263+ "D" (&bios32_indirect),
36264+ "r"(__PCIBIOS_DS)
36265+ : "memory");
36266+
36267+ pax_open_kernel();
36268+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
36269+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
36270+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
36271+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
36272+ pax_close_kernel();
36273+
36274 local_irq_restore(flags);
36275
36276 switch (return_code) {
36277- case 0:
36278- return address + entry;
36279- case 0x80: /* Not present */
36280- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
36281- return 0;
36282- default: /* Shouldn't happen */
36283- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
36284- service, return_code);
36285+ case 0: {
36286+ int cpu;
36287+ unsigned char flags;
36288+
36289+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
36290+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
36291+ printk(KERN_WARNING "bios32_service: not valid\n");
36292 return 0;
36293+ }
36294+ address = address + PAGE_OFFSET;
36295+ length += 16UL; /* some BIOSs underreport this... */
36296+ flags = 4;
36297+ if (length >= 64*1024*1024) {
36298+ length >>= PAGE_SHIFT;
36299+ flags |= 8;
36300+ }
36301+
36302+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
36303+ gdt = get_cpu_gdt_table(cpu);
36304+ pack_descriptor(&d, address, length, 0x9b, flags);
36305+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
36306+ pack_descriptor(&d, address, length, 0x93, flags);
36307+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
36308+ }
36309+ return entry;
36310+ }
36311+ case 0x80: /* Not present */
36312+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
36313+ return 0;
36314+ default: /* Shouldn't happen */
36315+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
36316+ service, return_code);
36317+ return 0;
36318 }
36319 }
36320
36321 static struct {
36322 unsigned long address;
36323 unsigned short segment;
36324-} pci_indirect = { 0, __KERNEL_CS };
36325+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
36326
36327-static int pci_bios_present;
36328+static int pci_bios_present __read_only;
36329
36330 static int check_pcibios(void)
36331 {
36332@@ -131,11 +174,13 @@ static int check_pcibios(void)
36333 unsigned long flags, pcibios_entry;
36334
36335 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
36336- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
36337+ pci_indirect.address = pcibios_entry;
36338
36339 local_irq_save(flags);
36340- __asm__(
36341- "lcall *(%%edi); cld\n\t"
36342+ __asm__("movw %w6, %%ds\n\t"
36343+ "lcall *%%ss:(%%edi); cld\n\t"
36344+ "push %%ss\n\t"
36345+ "pop %%ds\n\t"
36346 "jc 1f\n\t"
36347 "xor %%ah, %%ah\n"
36348 "1:"
36349@@ -144,7 +189,8 @@ static int check_pcibios(void)
36350 "=b" (ebx),
36351 "=c" (ecx)
36352 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
36353- "D" (&pci_indirect)
36354+ "D" (&pci_indirect),
36355+ "r" (__PCIBIOS_DS)
36356 : "memory");
36357 local_irq_restore(flags);
36358
36359@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36360
36361 switch (len) {
36362 case 1:
36363- __asm__("lcall *(%%esi); cld\n\t"
36364+ __asm__("movw %w6, %%ds\n\t"
36365+ "lcall *%%ss:(%%esi); cld\n\t"
36366+ "push %%ss\n\t"
36367+ "pop %%ds\n\t"
36368 "jc 1f\n\t"
36369 "xor %%ah, %%ah\n"
36370 "1:"
36371@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36372 : "1" (PCIBIOS_READ_CONFIG_BYTE),
36373 "b" (bx),
36374 "D" ((long)reg),
36375- "S" (&pci_indirect));
36376+ "S" (&pci_indirect),
36377+ "r" (__PCIBIOS_DS));
36378 /*
36379 * Zero-extend the result beyond 8 bits, do not trust the
36380 * BIOS having done it:
36381@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36382 *value &= 0xff;
36383 break;
36384 case 2:
36385- __asm__("lcall *(%%esi); cld\n\t"
36386+ __asm__("movw %w6, %%ds\n\t"
36387+ "lcall *%%ss:(%%esi); cld\n\t"
36388+ "push %%ss\n\t"
36389+ "pop %%ds\n\t"
36390 "jc 1f\n\t"
36391 "xor %%ah, %%ah\n"
36392 "1:"
36393@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36394 : "1" (PCIBIOS_READ_CONFIG_WORD),
36395 "b" (bx),
36396 "D" ((long)reg),
36397- "S" (&pci_indirect));
36398+ "S" (&pci_indirect),
36399+ "r" (__PCIBIOS_DS));
36400 /*
36401 * Zero-extend the result beyond 16 bits, do not trust the
36402 * BIOS having done it:
36403@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36404 *value &= 0xffff;
36405 break;
36406 case 4:
36407- __asm__("lcall *(%%esi); cld\n\t"
36408+ __asm__("movw %w6, %%ds\n\t"
36409+ "lcall *%%ss:(%%esi); cld\n\t"
36410+ "push %%ss\n\t"
36411+ "pop %%ds\n\t"
36412 "jc 1f\n\t"
36413 "xor %%ah, %%ah\n"
36414 "1:"
36415@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36416 : "1" (PCIBIOS_READ_CONFIG_DWORD),
36417 "b" (bx),
36418 "D" ((long)reg),
36419- "S" (&pci_indirect));
36420+ "S" (&pci_indirect),
36421+ "r" (__PCIBIOS_DS));
36422 break;
36423 }
36424
36425@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36426
36427 switch (len) {
36428 case 1:
36429- __asm__("lcall *(%%esi); cld\n\t"
36430+ __asm__("movw %w6, %%ds\n\t"
36431+ "lcall *%%ss:(%%esi); cld\n\t"
36432+ "push %%ss\n\t"
36433+ "pop %%ds\n\t"
36434 "jc 1f\n\t"
36435 "xor %%ah, %%ah\n"
36436 "1:"
36437@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36438 "c" (value),
36439 "b" (bx),
36440 "D" ((long)reg),
36441- "S" (&pci_indirect));
36442+ "S" (&pci_indirect),
36443+ "r" (__PCIBIOS_DS));
36444 break;
36445 case 2:
36446- __asm__("lcall *(%%esi); cld\n\t"
36447+ __asm__("movw %w6, %%ds\n\t"
36448+ "lcall *%%ss:(%%esi); cld\n\t"
36449+ "push %%ss\n\t"
36450+ "pop %%ds\n\t"
36451 "jc 1f\n\t"
36452 "xor %%ah, %%ah\n"
36453 "1:"
36454@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36455 "c" (value),
36456 "b" (bx),
36457 "D" ((long)reg),
36458- "S" (&pci_indirect));
36459+ "S" (&pci_indirect),
36460+ "r" (__PCIBIOS_DS));
36461 break;
36462 case 4:
36463- __asm__("lcall *(%%esi); cld\n\t"
36464+ __asm__("movw %w6, %%ds\n\t"
36465+ "lcall *%%ss:(%%esi); cld\n\t"
36466+ "push %%ss\n\t"
36467+ "pop %%ds\n\t"
36468 "jc 1f\n\t"
36469 "xor %%ah, %%ah\n"
36470 "1:"
36471@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36472 "c" (value),
36473 "b" (bx),
36474 "D" ((long)reg),
36475- "S" (&pci_indirect));
36476+ "S" (&pci_indirect),
36477+ "r" (__PCIBIOS_DS));
36478 break;
36479 }
36480
36481@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
36482
36483 DBG("PCI: Fetching IRQ routing table... ");
36484 __asm__("push %%es\n\t"
36485+ "movw %w8, %%ds\n\t"
36486 "push %%ds\n\t"
36487 "pop %%es\n\t"
36488- "lcall *(%%esi); cld\n\t"
36489+ "lcall *%%ss:(%%esi); cld\n\t"
36490 "pop %%es\n\t"
36491+ "push %%ss\n\t"
36492+ "pop %%ds\n"
36493 "jc 1f\n\t"
36494 "xor %%ah, %%ah\n"
36495 "1:"
36496@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
36497 "1" (0),
36498 "D" ((long) &opt),
36499 "S" (&pci_indirect),
36500- "m" (opt)
36501+ "m" (opt),
36502+ "r" (__PCIBIOS_DS)
36503 : "memory");
36504 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
36505 if (ret & 0xff00)
36506@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
36507 {
36508 int ret;
36509
36510- __asm__("lcall *(%%esi); cld\n\t"
36511+ __asm__("movw %w5, %%ds\n\t"
36512+ "lcall *%%ss:(%%esi); cld\n\t"
36513+ "push %%ss\n\t"
36514+ "pop %%ds\n"
36515 "jc 1f\n\t"
36516 "xor %%ah, %%ah\n"
36517 "1:"
36518@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
36519 : "0" (PCIBIOS_SET_PCI_HW_INT),
36520 "b" ((dev->bus->number << 8) | dev->devfn),
36521 "c" ((irq << 8) | (pin + 10)),
36522- "S" (&pci_indirect));
36523+ "S" (&pci_indirect),
36524+ "r" (__PCIBIOS_DS));
36525 return !(ret & 0xff00);
36526 }
36527 EXPORT_SYMBOL(pcibios_set_irq_routing);
36528diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
36529index 9ee3491..872192f 100644
36530--- a/arch/x86/platform/efi/efi_32.c
36531+++ b/arch/x86/platform/efi/efi_32.c
36532@@ -59,11 +59,22 @@ void efi_call_phys_prelog(void)
36533 {
36534 struct desc_ptr gdt_descr;
36535
36536+#ifdef CONFIG_PAX_KERNEXEC
36537+ struct desc_struct d;
36538+#endif
36539+
36540 local_irq_save(efi_rt_eflags);
36541
36542 load_cr3(initial_page_table);
36543 __flush_tlb_all();
36544
36545+#ifdef CONFIG_PAX_KERNEXEC
36546+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
36547+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
36548+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
36549+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
36550+#endif
36551+
36552 gdt_descr.address = __pa(get_cpu_gdt_table(0));
36553 gdt_descr.size = GDT_SIZE - 1;
36554 load_gdt(&gdt_descr);
36555@@ -73,11 +84,24 @@ void efi_call_phys_epilog(void)
36556 {
36557 struct desc_ptr gdt_descr;
36558
36559+#ifdef CONFIG_PAX_KERNEXEC
36560+ struct desc_struct d;
36561+
36562+ memset(&d, 0, sizeof d);
36563+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
36564+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
36565+#endif
36566+
36567 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
36568 gdt_descr.size = GDT_SIZE - 1;
36569 load_gdt(&gdt_descr);
36570
36571+#ifdef CONFIG_PAX_PER_CPU_PGD
36572+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
36573+#else
36574 load_cr3(swapper_pg_dir);
36575+#endif
36576+
36577 __flush_tlb_all();
36578
36579 local_irq_restore(efi_rt_eflags);
36580diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
36581index 290d397..3906bcd 100644
36582--- a/arch/x86/platform/efi/efi_64.c
36583+++ b/arch/x86/platform/efi/efi_64.c
36584@@ -99,6 +99,11 @@ void __init efi_call_phys_prelog(void)
36585 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
36586 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
36587 }
36588+
36589+#ifdef CONFIG_PAX_PER_CPU_PGD
36590+ load_cr3(swapper_pg_dir);
36591+#endif
36592+
36593 __flush_tlb_all();
36594 }
36595
36596@@ -116,6 +121,11 @@ void __init efi_call_phys_epilog(void)
36597 for (pgd = 0; pgd < n_pgds; pgd++)
36598 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
36599 kfree(save_pgd);
36600+
36601+#ifdef CONFIG_PAX_PER_CPU_PGD
36602+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
36603+#endif
36604+
36605 __flush_tlb_all();
36606 local_irq_restore(efi_flags);
36607 early_code_mapping_set_exec(0);
36608diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
36609index fbe66e6..eae5e38 100644
36610--- a/arch/x86/platform/efi/efi_stub_32.S
36611+++ b/arch/x86/platform/efi/efi_stub_32.S
36612@@ -6,7 +6,9 @@
36613 */
36614
36615 #include <linux/linkage.h>
36616+#include <linux/init.h>
36617 #include <asm/page_types.h>
36618+#include <asm/segment.h>
36619
36620 /*
36621 * efi_call_phys(void *, ...) is a function with variable parameters.
36622@@ -20,7 +22,7 @@
36623 * service functions will comply with gcc calling convention, too.
36624 */
36625
36626-.text
36627+__INIT
36628 ENTRY(efi_call_phys)
36629 /*
36630 * 0. The function can only be called in Linux kernel. So CS has been
36631@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
36632 * The mapping of lower virtual memory has been created in prelog and
36633 * epilog.
36634 */
36635- movl $1f, %edx
36636- subl $__PAGE_OFFSET, %edx
36637- jmp *%edx
36638+#ifdef CONFIG_PAX_KERNEXEC
36639+ movl $(__KERNEXEC_EFI_DS), %edx
36640+ mov %edx, %ds
36641+ mov %edx, %es
36642+ mov %edx, %ss
36643+ addl $2f,(1f)
36644+ ljmp *(1f)
36645+
36646+__INITDATA
36647+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
36648+.previous
36649+
36650+2:
36651+ subl $2b,(1b)
36652+#else
36653+ jmp 1f-__PAGE_OFFSET
36654 1:
36655+#endif
36656
36657 /*
36658 * 2. Now on the top of stack is the return
36659@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
36660 * parameter 2, ..., param n. To make things easy, we save the return
36661 * address of efi_call_phys in a global variable.
36662 */
36663- popl %edx
36664- movl %edx, saved_return_addr
36665- /* get the function pointer into ECX*/
36666- popl %ecx
36667- movl %ecx, efi_rt_function_ptr
36668- movl $2f, %edx
36669- subl $__PAGE_OFFSET, %edx
36670- pushl %edx
36671+ popl (saved_return_addr)
36672+ popl (efi_rt_function_ptr)
36673
36674 /*
36675 * 3. Clear PG bit in %CR0.
36676@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
36677 /*
36678 * 5. Call the physical function.
36679 */
36680- jmp *%ecx
36681+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
36682
36683-2:
36684 /*
36685 * 6. After EFI runtime service returns, control will return to
36686 * following instruction. We'd better readjust stack pointer first.
36687@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
36688 movl %cr0, %edx
36689 orl $0x80000000, %edx
36690 movl %edx, %cr0
36691- jmp 1f
36692-1:
36693+
36694 /*
36695 * 8. Now restore the virtual mode from flat mode by
36696 * adding EIP with PAGE_OFFSET.
36697 */
36698- movl $1f, %edx
36699- jmp *%edx
36700+#ifdef CONFIG_PAX_KERNEXEC
36701+ movl $(__KERNEL_DS), %edx
36702+ mov %edx, %ds
36703+ mov %edx, %es
36704+ mov %edx, %ss
36705+ ljmp $(__KERNEL_CS),$1f
36706+#else
36707+ jmp 1f+__PAGE_OFFSET
36708+#endif
36709 1:
36710
36711 /*
36712 * 9. Balance the stack. And because EAX contain the return value,
36713 * we'd better not clobber it.
36714 */
36715- leal efi_rt_function_ptr, %edx
36716- movl (%edx), %ecx
36717- pushl %ecx
36718+ pushl (efi_rt_function_ptr)
36719
36720 /*
36721- * 10. Push the saved return address onto the stack and return.
36722+ * 10. Return to the saved return address.
36723 */
36724- leal saved_return_addr, %edx
36725- movl (%edx), %ecx
36726- pushl %ecx
36727- ret
36728+ jmpl *(saved_return_addr)
36729 ENDPROC(efi_call_phys)
36730 .previous
36731
36732-.data
36733+__INITDATA
36734 saved_return_addr:
36735 .long 0
36736 efi_rt_function_ptr:
36737diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
36738index 5fcda72..b9d1d65 100644
36739--- a/arch/x86/platform/efi/efi_stub_64.S
36740+++ b/arch/x86/platform/efi/efi_stub_64.S
36741@@ -11,6 +11,7 @@
36742 #include <asm/msr.h>
36743 #include <asm/processor-flags.h>
36744 #include <asm/page_types.h>
36745+#include <asm/alternative-asm.h>
36746
36747 #define SAVE_XMM \
36748 mov %rsp, %rax; \
36749@@ -88,6 +89,7 @@ ENTRY(efi_call)
36750 RESTORE_PGT
36751 addq $48, %rsp
36752 RESTORE_XMM
36753+ pax_force_retaddr 0, 1
36754 ret
36755 ENDPROC(efi_call)
36756
36757diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
36758index 1bbedc4..eb795b5 100644
36759--- a/arch/x86/platform/intel-mid/intel-mid.c
36760+++ b/arch/x86/platform/intel-mid/intel-mid.c
36761@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
36762 {
36763 };
36764
36765-static void intel_mid_reboot(void)
36766+static void __noreturn intel_mid_reboot(void)
36767 {
36768 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
36769+ BUG();
36770 }
36771
36772 static unsigned long __init intel_mid_calibrate_tsc(void)
36773diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
36774index d6ee929..3637cb5 100644
36775--- a/arch/x86/platform/olpc/olpc_dt.c
36776+++ b/arch/x86/platform/olpc/olpc_dt.c
36777@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
36778 return res;
36779 }
36780
36781-static struct of_pdt_ops prom_olpc_ops __initdata = {
36782+static struct of_pdt_ops prom_olpc_ops __initconst = {
36783 .nextprop = olpc_dt_nextprop,
36784 .getproplen = olpc_dt_getproplen,
36785 .getproperty = olpc_dt_getproperty,
36786diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
36787index 424f4c9..f2a2988 100644
36788--- a/arch/x86/power/cpu.c
36789+++ b/arch/x86/power/cpu.c
36790@@ -137,11 +137,8 @@ static void do_fpu_end(void)
36791 static void fix_processor_context(void)
36792 {
36793 int cpu = smp_processor_id();
36794- struct tss_struct *t = &per_cpu(init_tss, cpu);
36795-#ifdef CONFIG_X86_64
36796- struct desc_struct *desc = get_cpu_gdt_table(cpu);
36797- tss_desc tss;
36798-#endif
36799+ struct tss_struct *t = init_tss + cpu;
36800+
36801 set_tss_desc(cpu, t); /*
36802 * This just modifies memory; should not be
36803 * necessary. But... This is necessary, because
36804@@ -150,10 +147,6 @@ static void fix_processor_context(void)
36805 */
36806
36807 #ifdef CONFIG_X86_64
36808- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
36809- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
36810- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
36811-
36812 syscall_init(); /* This sets MSR_*STAR and related */
36813 #endif
36814 load_TR_desc(); /* This does ltr */
36815diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
36816index bad628a..a102610 100644
36817--- a/arch/x86/realmode/init.c
36818+++ b/arch/x86/realmode/init.c
36819@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
36820 __va(real_mode_header->trampoline_header);
36821
36822 #ifdef CONFIG_X86_32
36823- trampoline_header->start = __pa_symbol(startup_32_smp);
36824+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
36825+
36826+#ifdef CONFIG_PAX_KERNEXEC
36827+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
36828+#endif
36829+
36830+ trampoline_header->boot_cs = __BOOT_CS;
36831 trampoline_header->gdt_limit = __BOOT_DS + 7;
36832 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
36833 #else
36834@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
36835 *trampoline_cr4_features = read_cr4();
36836
36837 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
36838- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
36839+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
36840 trampoline_pgd[511] = init_level4_pgt[511].pgd;
36841 #endif
36842 }
36843diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
36844index 7c0d7be..d24dc88 100644
36845--- a/arch/x86/realmode/rm/Makefile
36846+++ b/arch/x86/realmode/rm/Makefile
36847@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
36848
36849 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
36850 -I$(srctree)/arch/x86/boot
36851+ifdef CONSTIFY_PLUGIN
36852+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
36853+endif
36854 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
36855 GCOV_PROFILE := n
36856diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
36857index a28221d..93c40f1 100644
36858--- a/arch/x86/realmode/rm/header.S
36859+++ b/arch/x86/realmode/rm/header.S
36860@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
36861 #endif
36862 /* APM/BIOS reboot */
36863 .long pa_machine_real_restart_asm
36864-#ifdef CONFIG_X86_64
36865+#ifdef CONFIG_X86_32
36866+ .long __KERNEL_CS
36867+#else
36868 .long __KERNEL32_CS
36869 #endif
36870 END(real_mode_header)
36871diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
36872index 48ddd76..c26749f 100644
36873--- a/arch/x86/realmode/rm/trampoline_32.S
36874+++ b/arch/x86/realmode/rm/trampoline_32.S
36875@@ -24,6 +24,12 @@
36876 #include <asm/page_types.h>
36877 #include "realmode.h"
36878
36879+#ifdef CONFIG_PAX_KERNEXEC
36880+#define ta(X) (X)
36881+#else
36882+#define ta(X) (pa_ ## X)
36883+#endif
36884+
36885 .text
36886 .code16
36887
36888@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
36889
36890 cli # We should be safe anyway
36891
36892- movl tr_start, %eax # where we need to go
36893-
36894 movl $0xA5A5A5A5, trampoline_status
36895 # write marker for master knows we're running
36896
36897@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
36898 movw $1, %dx # protected mode (PE) bit
36899 lmsw %dx # into protected mode
36900
36901- ljmpl $__BOOT_CS, $pa_startup_32
36902+ ljmpl *(trampoline_header)
36903
36904 .section ".text32","ax"
36905 .code32
36906@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
36907 .balign 8
36908 GLOBAL(trampoline_header)
36909 tr_start: .space 4
36910- tr_gdt_pad: .space 2
36911+ tr_boot_cs: .space 2
36912 tr_gdt: .space 6
36913 END(trampoline_header)
36914
36915diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
36916index dac7b20..72dbaca 100644
36917--- a/arch/x86/realmode/rm/trampoline_64.S
36918+++ b/arch/x86/realmode/rm/trampoline_64.S
36919@@ -93,6 +93,7 @@ ENTRY(startup_32)
36920 movl %edx, %gs
36921
36922 movl pa_tr_cr4, %eax
36923+ andl $~X86_CR4_PCIDE, %eax
36924 movl %eax, %cr4 # Enable PAE mode
36925
36926 # Setup trampoline 4 level pagetables
36927@@ -106,7 +107,7 @@ ENTRY(startup_32)
36928 wrmsr
36929
36930 # Enable paging and in turn activate Long Mode
36931- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
36932+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
36933 movl %eax, %cr0
36934
36935 /*
36936diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
36937index 9e7e147..25a4158 100644
36938--- a/arch/x86/realmode/rm/wakeup_asm.S
36939+++ b/arch/x86/realmode/rm/wakeup_asm.S
36940@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
36941 lgdtl pmode_gdt
36942
36943 /* This really couldn't... */
36944- movl pmode_entry, %eax
36945 movl pmode_cr0, %ecx
36946 movl %ecx, %cr0
36947- ljmpl $__KERNEL_CS, $pa_startup_32
36948- /* -> jmp *%eax in trampoline_32.S */
36949+
36950+ ljmpl *pmode_entry
36951 #else
36952 jmp trampoline_start
36953 #endif
36954diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
36955index 604a37e..e49702a 100644
36956--- a/arch/x86/tools/Makefile
36957+++ b/arch/x86/tools/Makefile
36958@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
36959
36960 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
36961
36962-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
36963+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
36964 hostprogs-y += relocs
36965 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
36966 PHONY += relocs
36967diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
36968index bbb1d22..e505211 100644
36969--- a/arch/x86/tools/relocs.c
36970+++ b/arch/x86/tools/relocs.c
36971@@ -1,5 +1,7 @@
36972 /* This is included from relocs_32/64.c */
36973
36974+#include "../../../include/generated/autoconf.h"
36975+
36976 #define ElfW(type) _ElfW(ELF_BITS, type)
36977 #define _ElfW(bits, type) __ElfW(bits, type)
36978 #define __ElfW(bits, type) Elf##bits##_##type
36979@@ -11,6 +13,7 @@
36980 #define Elf_Sym ElfW(Sym)
36981
36982 static Elf_Ehdr ehdr;
36983+static Elf_Phdr *phdr;
36984
36985 struct relocs {
36986 uint32_t *offset;
36987@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
36988 }
36989 }
36990
36991+static void read_phdrs(FILE *fp)
36992+{
36993+ unsigned int i;
36994+
36995+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
36996+ if (!phdr) {
36997+ die("Unable to allocate %d program headers\n",
36998+ ehdr.e_phnum);
36999+ }
37000+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
37001+ die("Seek to %d failed: %s\n",
37002+ ehdr.e_phoff, strerror(errno));
37003+ }
37004+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
37005+ die("Cannot read ELF program headers: %s\n",
37006+ strerror(errno));
37007+ }
37008+ for(i = 0; i < ehdr.e_phnum; i++) {
37009+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
37010+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
37011+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
37012+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
37013+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
37014+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
37015+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
37016+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
37017+ }
37018+
37019+}
37020+
37021 static void read_shdrs(FILE *fp)
37022 {
37023- int i;
37024+ unsigned int i;
37025 Elf_Shdr shdr;
37026
37027 secs = calloc(ehdr.e_shnum, sizeof(struct section));
37028@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
37029
37030 static void read_strtabs(FILE *fp)
37031 {
37032- int i;
37033+ unsigned int i;
37034 for (i = 0; i < ehdr.e_shnum; i++) {
37035 struct section *sec = &secs[i];
37036 if (sec->shdr.sh_type != SHT_STRTAB) {
37037@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
37038
37039 static void read_symtabs(FILE *fp)
37040 {
37041- int i,j;
37042+ unsigned int i,j;
37043 for (i = 0; i < ehdr.e_shnum; i++) {
37044 struct section *sec = &secs[i];
37045 if (sec->shdr.sh_type != SHT_SYMTAB) {
37046@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
37047 }
37048
37049
37050-static void read_relocs(FILE *fp)
37051+static void read_relocs(FILE *fp, int use_real_mode)
37052 {
37053- int i,j;
37054+ unsigned int i,j;
37055+ uint32_t base;
37056+
37057 for (i = 0; i < ehdr.e_shnum; i++) {
37058 struct section *sec = &secs[i];
37059 if (sec->shdr.sh_type != SHT_REL_TYPE) {
37060@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
37061 die("Cannot read symbol table: %s\n",
37062 strerror(errno));
37063 }
37064+ base = 0;
37065+
37066+#ifdef CONFIG_X86_32
37067+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
37068+ if (phdr[j].p_type != PT_LOAD )
37069+ continue;
37070+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
37071+ continue;
37072+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
37073+ break;
37074+ }
37075+#endif
37076+
37077 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
37078 Elf_Rel *rel = &sec->reltab[j];
37079- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
37080+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
37081 rel->r_info = elf_xword_to_cpu(rel->r_info);
37082 #if (SHT_REL_TYPE == SHT_RELA)
37083 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
37084@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
37085
37086 static void print_absolute_symbols(void)
37087 {
37088- int i;
37089+ unsigned int i;
37090 const char *format;
37091
37092 if (ELF_BITS == 64)
37093@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
37094 for (i = 0; i < ehdr.e_shnum; i++) {
37095 struct section *sec = &secs[i];
37096 char *sym_strtab;
37097- int j;
37098+ unsigned int j;
37099
37100 if (sec->shdr.sh_type != SHT_SYMTAB) {
37101 continue;
37102@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
37103
37104 static void print_absolute_relocs(void)
37105 {
37106- int i, printed = 0;
37107+ unsigned int i, printed = 0;
37108 const char *format;
37109
37110 if (ELF_BITS == 64)
37111@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
37112 struct section *sec_applies, *sec_symtab;
37113 char *sym_strtab;
37114 Elf_Sym *sh_symtab;
37115- int j;
37116+ unsigned int j;
37117 if (sec->shdr.sh_type != SHT_REL_TYPE) {
37118 continue;
37119 }
37120@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
37121 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
37122 Elf_Sym *sym, const char *symname))
37123 {
37124- int i;
37125+ unsigned int i;
37126 /* Walk through the relocations */
37127 for (i = 0; i < ehdr.e_shnum; i++) {
37128 char *sym_strtab;
37129 Elf_Sym *sh_symtab;
37130 struct section *sec_applies, *sec_symtab;
37131- int j;
37132+ unsigned int j;
37133 struct section *sec = &secs[i];
37134
37135 if (sec->shdr.sh_type != SHT_REL_TYPE) {
37136@@ -822,6 +870,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
37137 {
37138 unsigned r_type = ELF32_R_TYPE(rel->r_info);
37139 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
37140+ char *sym_strtab = sec->link->link->strtab;
37141+
37142+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
37143+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
37144+ return 0;
37145+
37146+#ifdef CONFIG_PAX_KERNEXEC
37147+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
37148+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
37149+ return 0;
37150+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
37151+ return 0;
37152+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
37153+ return 0;
37154+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
37155+ return 0;
37156+#endif
37157
37158 switch (r_type) {
37159 case R_386_NONE:
37160@@ -960,7 +1025,7 @@ static int write32_as_text(uint32_t v, FILE *f)
37161
37162 static void emit_relocs(int as_text, int use_real_mode)
37163 {
37164- int i;
37165+ unsigned int i;
37166 int (*write_reloc)(uint32_t, FILE *) = write32;
37167 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
37168 const char *symname);
37169@@ -1060,10 +1125,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
37170 {
37171 regex_init(use_real_mode);
37172 read_ehdr(fp);
37173+ read_phdrs(fp);
37174 read_shdrs(fp);
37175 read_strtabs(fp);
37176 read_symtabs(fp);
37177- read_relocs(fp);
37178+ read_relocs(fp, use_real_mode);
37179 if (ELF_BITS == 64)
37180 percpu_init();
37181 if (show_absolute_syms) {
37182diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
37183index f40281e..92728c9 100644
37184--- a/arch/x86/um/mem_32.c
37185+++ b/arch/x86/um/mem_32.c
37186@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
37187 gate_vma.vm_start = FIXADDR_USER_START;
37188 gate_vma.vm_end = FIXADDR_USER_END;
37189 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
37190- gate_vma.vm_page_prot = __P101;
37191+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
37192
37193 return 0;
37194 }
37195diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
37196index 80ffa5b..a33bd15 100644
37197--- a/arch/x86/um/tls_32.c
37198+++ b/arch/x86/um/tls_32.c
37199@@ -260,7 +260,7 @@ out:
37200 if (unlikely(task == current &&
37201 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
37202 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
37203- "without flushed TLS.", current->pid);
37204+ "without flushed TLS.", task_pid_nr(current));
37205 }
37206
37207 return 0;
37208diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
37209index 61b04fe..3134230 100644
37210--- a/arch/x86/vdso/Makefile
37211+++ b/arch/x86/vdso/Makefile
37212@@ -170,7 +170,7 @@ quiet_cmd_vdso = VDSO $@
37213 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
37214 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
37215
37216-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
37217+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
37218 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
37219 GCOV_PROFILE := n
37220
37221diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
37222index e4f7781..ab5ab26 100644
37223--- a/arch/x86/vdso/vdso32-setup.c
37224+++ b/arch/x86/vdso/vdso32-setup.c
37225@@ -14,6 +14,7 @@
37226 #include <asm/cpufeature.h>
37227 #include <asm/processor.h>
37228 #include <asm/vdso.h>
37229+#include <asm/mman.h>
37230
37231 #ifdef CONFIG_COMPAT_VDSO
37232 #define VDSO_DEFAULT 0
37233diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
37234index 5a5176d..e570acd 100644
37235--- a/arch/x86/vdso/vma.c
37236+++ b/arch/x86/vdso/vma.c
37237@@ -16,10 +16,9 @@
37238 #include <asm/vdso.h>
37239 #include <asm/page.h>
37240 #include <asm/hpet.h>
37241+#include <asm/mman.h>
37242
37243 #if defined(CONFIG_X86_64)
37244-unsigned int __read_mostly vdso64_enabled = 1;
37245-
37246 extern unsigned short vdso_sync_cpuid;
37247 #endif
37248
37249@@ -101,6 +100,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
37250 .pages = no_pages,
37251 };
37252
37253+#ifdef CONFIG_PAX_RANDMMAP
37254+ if (mm->pax_flags & MF_PAX_RANDMMAP)
37255+ calculate_addr = false;
37256+#endif
37257+
37258 if (calculate_addr) {
37259 addr = vdso_addr(current->mm->start_stack,
37260 image->sym_end_mapping);
37261@@ -110,13 +114,13 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
37262
37263 down_write(&mm->mmap_sem);
37264
37265- addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0);
37266+ addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, MAP_EXECUTABLE);
37267 if (IS_ERR_VALUE(addr)) {
37268 ret = addr;
37269 goto up_fail;
37270 }
37271
37272- current->mm->context.vdso = (void __user *)addr;
37273+ mm->context.vdso = addr;
37274
37275 /*
37276 * MAYWRITE to allow gdb to COW and set breakpoints
37277@@ -161,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
37278 hpet_address >> PAGE_SHIFT,
37279 PAGE_SIZE,
37280 pgprot_noncached(PAGE_READONLY));
37281-
37282- if (ret)
37283- goto up_fail;
37284 }
37285 #endif
37286
37287 up_fail:
37288 if (ret)
37289- current->mm->context.vdso = NULL;
37290+ current->mm->context.vdso = 0;
37291
37292 up_write(&mm->mmap_sem);
37293 return ret;
37294@@ -189,8 +190,8 @@ static int load_vdso32(void)
37295
37296 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
37297 current_thread_info()->sysenter_return =
37298- current->mm->context.vdso +
37299- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
37300+ (void __force_user *)(current->mm->context.vdso +
37301+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
37302
37303 return 0;
37304 }
37305@@ -199,9 +200,6 @@ static int load_vdso32(void)
37306 #ifdef CONFIG_X86_64
37307 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
37308 {
37309- if (!vdso64_enabled)
37310- return 0;
37311-
37312 return map_vdso(&vdso_image_64, true);
37313 }
37314
37315@@ -210,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
37316 int uses_interp)
37317 {
37318 #ifdef CONFIG_X86_X32_ABI
37319- if (test_thread_flag(TIF_X32)) {
37320- if (!vdso64_enabled)
37321- return 0;
37322-
37323+ if (test_thread_flag(TIF_X32))
37324 return map_vdso(&vdso_image_x32, true);
37325- }
37326 #endif
37327
37328 return load_vdso32();
37329@@ -227,12 +221,3 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
37330 return load_vdso32();
37331 }
37332 #endif
37333-
37334-#ifdef CONFIG_X86_64
37335-static __init int vdso_setup(char *s)
37336-{
37337- vdso64_enabled = simple_strtoul(s, NULL, 0);
37338- return 0;
37339-}
37340-__setup("vdso=", vdso_setup);
37341-#endif
37342diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
37343index e88fda8..76ce7ce 100644
37344--- a/arch/x86/xen/Kconfig
37345+++ b/arch/x86/xen/Kconfig
37346@@ -9,6 +9,7 @@ config XEN
37347 select XEN_HAVE_PVMMU
37348 depends on X86_64 || (X86_32 && X86_PAE)
37349 depends on X86_TSC
37350+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
37351 help
37352 This is the Linux Xen port. Enabling this will allow the
37353 kernel to boot in a paravirtualized environment under the
37354diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
37355index ffb101e..98c0ecf 100644
37356--- a/arch/x86/xen/enlighten.c
37357+++ b/arch/x86/xen/enlighten.c
37358@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
37359
37360 struct shared_info xen_dummy_shared_info;
37361
37362-void *xen_initial_gdt;
37363-
37364 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
37365 __read_mostly int xen_have_vector_callback;
37366 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
37367@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
37368 {
37369 unsigned long va = dtr->address;
37370 unsigned int size = dtr->size + 1;
37371- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
37372- unsigned long frames[pages];
37373+ unsigned long frames[65536 / PAGE_SIZE];
37374 int f;
37375
37376 /*
37377@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
37378 {
37379 unsigned long va = dtr->address;
37380 unsigned int size = dtr->size + 1;
37381- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
37382- unsigned long frames[pages];
37383+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
37384 int f;
37385
37386 /*
37387@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
37388 * 8-byte entries, or 16 4k pages..
37389 */
37390
37391- BUG_ON(size > 65536);
37392+ BUG_ON(size > GDT_SIZE);
37393 BUG_ON(va & ~PAGE_MASK);
37394
37395 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
37396@@ -989,7 +985,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
37397 return 0;
37398 }
37399
37400-static void set_xen_basic_apic_ops(void)
37401+static void __init set_xen_basic_apic_ops(void)
37402 {
37403 apic->read = xen_apic_read;
37404 apic->write = xen_apic_write;
37405@@ -1295,30 +1291,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
37406 #endif
37407 };
37408
37409-static void xen_reboot(int reason)
37410+static __noreturn void xen_reboot(int reason)
37411 {
37412 struct sched_shutdown r = { .reason = reason };
37413
37414- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
37415- BUG();
37416+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
37417+ BUG();
37418 }
37419
37420-static void xen_restart(char *msg)
37421+static __noreturn void xen_restart(char *msg)
37422 {
37423 xen_reboot(SHUTDOWN_reboot);
37424 }
37425
37426-static void xen_emergency_restart(void)
37427+static __noreturn void xen_emergency_restart(void)
37428 {
37429 xen_reboot(SHUTDOWN_reboot);
37430 }
37431
37432-static void xen_machine_halt(void)
37433+static __noreturn void xen_machine_halt(void)
37434 {
37435 xen_reboot(SHUTDOWN_poweroff);
37436 }
37437
37438-static void xen_machine_power_off(void)
37439+static __noreturn void xen_machine_power_off(void)
37440 {
37441 if (pm_power_off)
37442 pm_power_off();
37443@@ -1568,7 +1564,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
37444 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
37445
37446 /* Work out if we support NX */
37447- x86_configure_nx();
37448+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
37449+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
37450+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
37451+ unsigned l, h;
37452+
37453+ __supported_pte_mask |= _PAGE_NX;
37454+ rdmsr(MSR_EFER, l, h);
37455+ l |= EFER_NX;
37456+ wrmsr(MSR_EFER, l, h);
37457+ }
37458+#endif
37459
37460 /* Get mfn list */
37461 xen_build_dynamic_phys_to_machine();
37462@@ -1596,13 +1602,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
37463
37464 machine_ops = xen_machine_ops;
37465
37466- /*
37467- * The only reliable way to retain the initial address of the
37468- * percpu gdt_page is to remember it here, so we can go and
37469- * mark it RW later, when the initial percpu area is freed.
37470- */
37471- xen_initial_gdt = &per_cpu(gdt_page, 0);
37472-
37473 xen_smp_init();
37474
37475 #ifdef CONFIG_ACPI_NUMA
37476diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
37477index e8a1201..046c66c 100644
37478--- a/arch/x86/xen/mmu.c
37479+++ b/arch/x86/xen/mmu.c
37480@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
37481 return val;
37482 }
37483
37484-static pteval_t pte_pfn_to_mfn(pteval_t val)
37485+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
37486 {
37487 if (val & _PAGE_PRESENT) {
37488 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
37489@@ -1904,6 +1904,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
37490 /* L3_k[510] -> level2_kernel_pgt
37491 * L3_i[511] -> level2_fixmap_pgt */
37492 convert_pfn_mfn(level3_kernel_pgt);
37493+ convert_pfn_mfn(level3_vmalloc_start_pgt);
37494+ convert_pfn_mfn(level3_vmalloc_end_pgt);
37495+ convert_pfn_mfn(level3_vmemmap_pgt);
37496 }
37497 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
37498 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
37499@@ -1933,8 +1936,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
37500 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
37501 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
37502 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
37503+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
37504+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
37505+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
37506 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
37507 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
37508+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
37509 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
37510 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
37511
37512@@ -2120,6 +2127,7 @@ static void __init xen_post_allocator_init(void)
37513 pv_mmu_ops.set_pud = xen_set_pud;
37514 #if PAGETABLE_LEVELS == 4
37515 pv_mmu_ops.set_pgd = xen_set_pgd;
37516+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
37517 #endif
37518
37519 /* This will work as long as patching hasn't happened yet
37520@@ -2198,6 +2206,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
37521 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
37522 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
37523 .set_pgd = xen_set_pgd_hyper,
37524+ .set_pgd_batched = xen_set_pgd_hyper,
37525
37526 .alloc_pud = xen_alloc_pmd_init,
37527 .release_pud = xen_release_pmd_init,
37528diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
37529index 7005974..54fb05f 100644
37530--- a/arch/x86/xen/smp.c
37531+++ b/arch/x86/xen/smp.c
37532@@ -283,17 +283,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
37533
37534 if (xen_pv_domain()) {
37535 if (!xen_feature(XENFEAT_writable_page_tables))
37536- /* We've switched to the "real" per-cpu gdt, so make
37537- * sure the old memory can be recycled. */
37538- make_lowmem_page_readwrite(xen_initial_gdt);
37539-
37540 #ifdef CONFIG_X86_32
37541 /*
37542 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
37543 * expects __USER_DS
37544 */
37545- loadsegment(ds, __USER_DS);
37546- loadsegment(es, __USER_DS);
37547+ loadsegment(ds, __KERNEL_DS);
37548+ loadsegment(es, __KERNEL_DS);
37549 #endif
37550
37551 xen_filter_cpu_maps();
37552@@ -372,7 +368,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
37553 #ifdef CONFIG_X86_32
37554 /* Note: PVH is not yet supported on x86_32. */
37555 ctxt->user_regs.fs = __KERNEL_PERCPU;
37556- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
37557+ savesegment(gs, ctxt->user_regs.gs);
37558 #endif
37559 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
37560
37561@@ -381,8 +377,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
37562 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
37563 ctxt->flags = VGCF_IN_KERNEL;
37564 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
37565- ctxt->user_regs.ds = __USER_DS;
37566- ctxt->user_regs.es = __USER_DS;
37567+ ctxt->user_regs.ds = __KERNEL_DS;
37568+ ctxt->user_regs.es = __KERNEL_DS;
37569 ctxt->user_regs.ss = __KERNEL_DS;
37570
37571 xen_copy_trap_info(ctxt->trap_ctxt);
37572@@ -437,14 +433,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
37573 int rc;
37574
37575 per_cpu(current_task, cpu) = idle;
37576+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
37577 #ifdef CONFIG_X86_32
37578 irq_ctx_init(cpu);
37579 #else
37580 clear_tsk_thread_flag(idle, TIF_FORK);
37581 #endif
37582- per_cpu(kernel_stack, cpu) =
37583- (unsigned long)task_stack_page(idle) -
37584- KERNEL_STACK_OFFSET + THREAD_SIZE;
37585+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
37586
37587 xen_setup_runstate_info(cpu);
37588 xen_setup_timer(cpu);
37589@@ -720,7 +715,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
37590
37591 void __init xen_smp_init(void)
37592 {
37593- smp_ops = xen_smp_ops;
37594+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
37595 xen_fill_possible_map();
37596 }
37597
37598diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
37599index fd92a64..1f72641 100644
37600--- a/arch/x86/xen/xen-asm_32.S
37601+++ b/arch/x86/xen/xen-asm_32.S
37602@@ -99,7 +99,7 @@ ENTRY(xen_iret)
37603 pushw %fs
37604 movl $(__KERNEL_PERCPU), %eax
37605 movl %eax, %fs
37606- movl %fs:xen_vcpu, %eax
37607+ mov PER_CPU_VAR(xen_vcpu), %eax
37608 POP_FS
37609 #else
37610 movl %ss:xen_vcpu, %eax
37611diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
37612index 485b695..fda3e7c 100644
37613--- a/arch/x86/xen/xen-head.S
37614+++ b/arch/x86/xen/xen-head.S
37615@@ -39,6 +39,17 @@ ENTRY(startup_xen)
37616 #ifdef CONFIG_X86_32
37617 mov %esi,xen_start_info
37618 mov $init_thread_union+THREAD_SIZE,%esp
37619+#ifdef CONFIG_SMP
37620+ movl $cpu_gdt_table,%edi
37621+ movl $__per_cpu_load,%eax
37622+ movw %ax,__KERNEL_PERCPU + 2(%edi)
37623+ rorl $16,%eax
37624+ movb %al,__KERNEL_PERCPU + 4(%edi)
37625+ movb %ah,__KERNEL_PERCPU + 7(%edi)
37626+ movl $__per_cpu_end - 1,%eax
37627+ subl $__per_cpu_start,%eax
37628+ movw %ax,__KERNEL_PERCPU + 0(%edi)
37629+#endif
37630 #else
37631 mov %rsi,xen_start_info
37632 mov $init_thread_union+THREAD_SIZE,%rsp
37633diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
37634index 97d8765..c4526ec 100644
37635--- a/arch/x86/xen/xen-ops.h
37636+++ b/arch/x86/xen/xen-ops.h
37637@@ -10,8 +10,6 @@
37638 extern const char xen_hypervisor_callback[];
37639 extern const char xen_failsafe_callback[];
37640
37641-extern void *xen_initial_gdt;
37642-
37643 struct trap_info;
37644 void xen_copy_trap_info(struct trap_info *traps);
37645
37646diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
37647index 525bd3d..ef888b1 100644
37648--- a/arch/xtensa/variants/dc232b/include/variant/core.h
37649+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
37650@@ -119,9 +119,9 @@
37651 ----------------------------------------------------------------------*/
37652
37653 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
37654-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
37655 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
37656 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
37657+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37658
37659 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
37660 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
37661diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
37662index 2f33760..835e50a 100644
37663--- a/arch/xtensa/variants/fsf/include/variant/core.h
37664+++ b/arch/xtensa/variants/fsf/include/variant/core.h
37665@@ -11,6 +11,7 @@
37666 #ifndef _XTENSA_CORE_H
37667 #define _XTENSA_CORE_H
37668
37669+#include <linux/const.h>
37670
37671 /****************************************************************************
37672 Parameters Useful for Any Code, USER or PRIVILEGED
37673@@ -112,9 +113,9 @@
37674 ----------------------------------------------------------------------*/
37675
37676 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
37677-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
37678 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
37679 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
37680+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37681
37682 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
37683 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
37684diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
37685index af00795..2bb8105 100644
37686--- a/arch/xtensa/variants/s6000/include/variant/core.h
37687+++ b/arch/xtensa/variants/s6000/include/variant/core.h
37688@@ -11,6 +11,7 @@
37689 #ifndef _XTENSA_CORE_CONFIGURATION_H
37690 #define _XTENSA_CORE_CONFIGURATION_H
37691
37692+#include <linux/const.h>
37693
37694 /****************************************************************************
37695 Parameters Useful for Any Code, USER or PRIVILEGED
37696@@ -118,9 +119,9 @@
37697 ----------------------------------------------------------------------*/
37698
37699 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
37700-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
37701 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
37702 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
37703+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37704
37705 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
37706 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
37707diff --git a/block/bio.c b/block/bio.c
37708index 0ec61c9..93b94060 100644
37709--- a/block/bio.c
37710+++ b/block/bio.c
37711@@ -1159,7 +1159,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
37712 /*
37713 * Overflow, abort
37714 */
37715- if (end < start)
37716+ if (end < start || end - start > INT_MAX - nr_pages)
37717 return ERR_PTR(-EINVAL);
37718
37719 nr_pages += end - start;
37720@@ -1293,7 +1293,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
37721 /*
37722 * Overflow, abort
37723 */
37724- if (end < start)
37725+ if (end < start || end - start > INT_MAX - nr_pages)
37726 return ERR_PTR(-EINVAL);
37727
37728 nr_pages += end - start;
37729@@ -1555,7 +1555,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
37730 const int read = bio_data_dir(bio) == READ;
37731 struct bio_map_data *bmd = bio->bi_private;
37732 int i;
37733- char *p = bmd->sgvecs[0].iov_base;
37734+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
37735
37736 bio_for_each_segment_all(bvec, bio, i) {
37737 char *addr = page_address(bvec->bv_page);
37738diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
37739index 28d227c..d4c0bad 100644
37740--- a/block/blk-cgroup.c
37741+++ b/block/blk-cgroup.c
37742@@ -822,7 +822,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
37743 static struct cgroup_subsys_state *
37744 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
37745 {
37746- static atomic64_t id_seq = ATOMIC64_INIT(0);
37747+ static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
37748 struct blkcg *blkcg;
37749
37750 if (!parent_css) {
37751@@ -836,7 +836,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
37752
37753 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
37754 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
37755- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
37756+ blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
37757 done:
37758 spin_lock_init(&blkcg->lock);
37759 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
37760diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
37761index 0736729..2ec3b48 100644
37762--- a/block/blk-iopoll.c
37763+++ b/block/blk-iopoll.c
37764@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
37765 }
37766 EXPORT_SYMBOL(blk_iopoll_complete);
37767
37768-static void blk_iopoll_softirq(struct softirq_action *h)
37769+static __latent_entropy void blk_iopoll_softirq(void)
37770 {
37771 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
37772 int rearm = 0, budget = blk_iopoll_budget;
37773diff --git a/block/blk-map.c b/block/blk-map.c
37774index f890d43..97b0482 100644
37775--- a/block/blk-map.c
37776+++ b/block/blk-map.c
37777@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
37778 if (!len || !kbuf)
37779 return -EINVAL;
37780
37781- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
37782+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
37783 if (do_copy)
37784 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
37785 else
37786diff --git a/block/blk-softirq.c b/block/blk-softirq.c
37787index 53b1737..08177d2e 100644
37788--- a/block/blk-softirq.c
37789+++ b/block/blk-softirq.c
37790@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
37791 * Softirq action handler - move entries to local list and loop over them
37792 * while passing them to the queue registered handler.
37793 */
37794-static void blk_done_softirq(struct softirq_action *h)
37795+static __latent_entropy void blk_done_softirq(void)
37796 {
37797 struct list_head *cpu_list, local_list;
37798
37799diff --git a/block/bsg.c b/block/bsg.c
37800index ff46add..c4ba8ee 100644
37801--- a/block/bsg.c
37802+++ b/block/bsg.c
37803@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
37804 struct sg_io_v4 *hdr, struct bsg_device *bd,
37805 fmode_t has_write_perm)
37806 {
37807+ unsigned char tmpcmd[sizeof(rq->__cmd)];
37808+ unsigned char *cmdptr;
37809+
37810 if (hdr->request_len > BLK_MAX_CDB) {
37811 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
37812 if (!rq->cmd)
37813 return -ENOMEM;
37814- }
37815+ cmdptr = rq->cmd;
37816+ } else
37817+ cmdptr = tmpcmd;
37818
37819- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
37820+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
37821 hdr->request_len))
37822 return -EFAULT;
37823
37824+ if (cmdptr != rq->cmd)
37825+ memcpy(rq->cmd, cmdptr, hdr->request_len);
37826+
37827 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
37828 if (blk_verify_command(rq->cmd, has_write_perm))
37829 return -EPERM;
37830diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
37831index a0926a6..b2b14b2 100644
37832--- a/block/compat_ioctl.c
37833+++ b/block/compat_ioctl.c
37834@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
37835 cgc = compat_alloc_user_space(sizeof(*cgc));
37836 cgc32 = compat_ptr(arg);
37837
37838- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
37839+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
37840 get_user(data, &cgc32->buffer) ||
37841 put_user(compat_ptr(data), &cgc->buffer) ||
37842 copy_in_user(&cgc->buflen, &cgc32->buflen,
37843@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
37844 err |= __get_user(f->spec1, &uf->spec1);
37845 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
37846 err |= __get_user(name, &uf->name);
37847- f->name = compat_ptr(name);
37848+ f->name = (void __force_kernel *)compat_ptr(name);
37849 if (err) {
37850 err = -EFAULT;
37851 goto out;
37852diff --git a/block/genhd.c b/block/genhd.c
37853index 791f419..89f21c4 100644
37854--- a/block/genhd.c
37855+++ b/block/genhd.c
37856@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
37857
37858 /*
37859 * Register device numbers dev..(dev+range-1)
37860- * range must be nonzero
37861+ * Noop if @range is zero.
37862 * The hash chain is sorted on range, so that subranges can override.
37863 */
37864 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
37865 struct kobject *(*probe)(dev_t, int *, void *),
37866 int (*lock)(dev_t, void *), void *data)
37867 {
37868- kobj_map(bdev_map, devt, range, module, probe, lock, data);
37869+ if (range)
37870+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
37871 }
37872
37873 EXPORT_SYMBOL(blk_register_region);
37874
37875+/* undo blk_register_region(), noop if @range is zero */
37876 void blk_unregister_region(dev_t devt, unsigned long range)
37877 {
37878- kobj_unmap(bdev_map, devt, range);
37879+ if (range)
37880+ kobj_unmap(bdev_map, devt, range);
37881 }
37882
37883 EXPORT_SYMBOL(blk_unregister_region);
37884diff --git a/block/partitions/efi.c b/block/partitions/efi.c
37885index dc51f46..d5446a8 100644
37886--- a/block/partitions/efi.c
37887+++ b/block/partitions/efi.c
37888@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
37889 if (!gpt)
37890 return NULL;
37891
37892+ if (!le32_to_cpu(gpt->num_partition_entries))
37893+ return NULL;
37894+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
37895+ if (!pte)
37896+ return NULL;
37897+
37898 count = le32_to_cpu(gpt->num_partition_entries) *
37899 le32_to_cpu(gpt->sizeof_partition_entry);
37900- if (!count)
37901- return NULL;
37902- pte = kmalloc(count, GFP_KERNEL);
37903- if (!pte)
37904- return NULL;
37905-
37906 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
37907 (u8 *) pte, count) < count) {
37908 kfree(pte);
37909diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
37910index 14695c6..27a4636 100644
37911--- a/block/scsi_ioctl.c
37912+++ b/block/scsi_ioctl.c
37913@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
37914 return put_user(0, p);
37915 }
37916
37917-static int sg_get_timeout(struct request_queue *q)
37918+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
37919 {
37920 return jiffies_to_clock_t(q->sg_timeout);
37921 }
37922@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
37923 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
37924 struct sg_io_hdr *hdr, fmode_t mode)
37925 {
37926- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
37927+ unsigned char tmpcmd[sizeof(rq->__cmd)];
37928+ unsigned char *cmdptr;
37929+
37930+ if (rq->cmd != rq->__cmd)
37931+ cmdptr = rq->cmd;
37932+ else
37933+ cmdptr = tmpcmd;
37934+
37935+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
37936 return -EFAULT;
37937+
37938+ if (cmdptr != rq->cmd)
37939+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
37940+
37941 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
37942 return -EPERM;
37943
37944@@ -413,6 +425,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
37945 int err;
37946 unsigned int in_len, out_len, bytes, opcode, cmdlen;
37947 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
37948+ unsigned char tmpcmd[sizeof(rq->__cmd)];
37949+ unsigned char *cmdptr;
37950
37951 if (!sic)
37952 return -EINVAL;
37953@@ -446,9 +460,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
37954 */
37955 err = -EFAULT;
37956 rq->cmd_len = cmdlen;
37957- if (copy_from_user(rq->cmd, sic->data, cmdlen))
37958+
37959+ if (rq->cmd != rq->__cmd)
37960+ cmdptr = rq->cmd;
37961+ else
37962+ cmdptr = tmpcmd;
37963+
37964+ if (copy_from_user(cmdptr, sic->data, cmdlen))
37965 goto error;
37966
37967+ if (rq->cmd != cmdptr)
37968+ memcpy(rq->cmd, cmdptr, cmdlen);
37969+
37970 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
37971 goto error;
37972
37973diff --git a/crypto/cryptd.c b/crypto/cryptd.c
37974index 7bdd61b..afec999 100644
37975--- a/crypto/cryptd.c
37976+++ b/crypto/cryptd.c
37977@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
37978
37979 struct cryptd_blkcipher_request_ctx {
37980 crypto_completion_t complete;
37981-};
37982+} __no_const;
37983
37984 struct cryptd_hash_ctx {
37985 struct crypto_shash *child;
37986@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
37987
37988 struct cryptd_aead_request_ctx {
37989 crypto_completion_t complete;
37990-};
37991+} __no_const;
37992
37993 static void cryptd_queue_worker(struct work_struct *work);
37994
37995diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
37996index 309d345..1632720 100644
37997--- a/crypto/pcrypt.c
37998+++ b/crypto/pcrypt.c
37999@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
38000 int ret;
38001
38002 pinst->kobj.kset = pcrypt_kset;
38003- ret = kobject_add(&pinst->kobj, NULL, name);
38004+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
38005 if (!ret)
38006 kobject_uevent(&pinst->kobj, KOBJ_ADD);
38007
38008diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
38009index 6921c7f..78e1af7 100644
38010--- a/drivers/acpi/acpica/hwxfsleep.c
38011+++ b/drivers/acpi/acpica/hwxfsleep.c
38012@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
38013 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
38014
38015 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
38016- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
38017- acpi_hw_extended_sleep},
38018- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
38019- acpi_hw_extended_wake_prep},
38020- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
38021+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
38022+ .extended_function = acpi_hw_extended_sleep},
38023+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
38024+ .extended_function = acpi_hw_extended_wake_prep},
38025+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
38026+ .extended_function = acpi_hw_extended_wake}
38027 };
38028
38029 /*
38030diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
38031index e5bcd91..74f050d 100644
38032--- a/drivers/acpi/apei/apei-internal.h
38033+++ b/drivers/acpi/apei/apei-internal.h
38034@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
38035 struct apei_exec_ins_type {
38036 u32 flags;
38037 apei_exec_ins_func_t run;
38038-};
38039+} __do_const;
38040
38041 struct apei_exec_context {
38042 u32 ip;
38043diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
38044index dab7cb7..f0d2994 100644
38045--- a/drivers/acpi/apei/ghes.c
38046+++ b/drivers/acpi/apei/ghes.c
38047@@ -500,7 +500,7 @@ static void __ghes_print_estatus(const char *pfx,
38048 const struct acpi_hest_generic *generic,
38049 const struct acpi_generic_status *estatus)
38050 {
38051- static atomic_t seqno;
38052+ static atomic_unchecked_t seqno;
38053 unsigned int curr_seqno;
38054 char pfx_seq[64];
38055
38056@@ -511,7 +511,7 @@ static void __ghes_print_estatus(const char *pfx,
38057 else
38058 pfx = KERN_ERR;
38059 }
38060- curr_seqno = atomic_inc_return(&seqno);
38061+ curr_seqno = atomic_inc_return_unchecked(&seqno);
38062 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
38063 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
38064 pfx_seq, generic->header.source_id);
38065diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
38066index a83e3c6..c3d617f 100644
38067--- a/drivers/acpi/bgrt.c
38068+++ b/drivers/acpi/bgrt.c
38069@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
38070 if (!bgrt_image)
38071 return -ENODEV;
38072
38073- bin_attr_image.private = bgrt_image;
38074- bin_attr_image.size = bgrt_image_size;
38075+ pax_open_kernel();
38076+ *(void **)&bin_attr_image.private = bgrt_image;
38077+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
38078+ pax_close_kernel();
38079
38080 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
38081 if (!bgrt_kobj)
38082diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
38083index 3d8413d..95f638c 100644
38084--- a/drivers/acpi/blacklist.c
38085+++ b/drivers/acpi/blacklist.c
38086@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
38087 u32 is_critical_error;
38088 };
38089
38090-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
38091+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
38092
38093 /*
38094 * POLICY: If *anything* doesn't work, put it on the blacklist.
38095@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
38096 return 0;
38097 }
38098
38099-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
38100+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
38101 {
38102 .callback = dmi_disable_osi_vista,
38103 .ident = "Fujitsu Siemens",
38104diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
38105index c68e724..e863008 100644
38106--- a/drivers/acpi/custom_method.c
38107+++ b/drivers/acpi/custom_method.c
38108@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
38109 struct acpi_table_header table;
38110 acpi_status status;
38111
38112+#ifdef CONFIG_GRKERNSEC_KMEM
38113+ return -EPERM;
38114+#endif
38115+
38116 if (!(*ppos)) {
38117 /* parse the table header to get the table length */
38118 if (count <= sizeof(struct acpi_table_header))
38119diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
38120index 3dca36d..abaf070 100644
38121--- a/drivers/acpi/processor_idle.c
38122+++ b/drivers/acpi/processor_idle.c
38123@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
38124 {
38125 int i, count = CPUIDLE_DRIVER_STATE_START;
38126 struct acpi_processor_cx *cx;
38127- struct cpuidle_state *state;
38128+ cpuidle_state_no_const *state;
38129 struct cpuidle_driver *drv = &acpi_idle_driver;
38130
38131 if (!pr->flags.power_setup_done)
38132diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
38133index 38cb978..352c761 100644
38134--- a/drivers/acpi/sysfs.c
38135+++ b/drivers/acpi/sysfs.c
38136@@ -423,11 +423,11 @@ static u32 num_counters;
38137 static struct attribute **all_attrs;
38138 static u32 acpi_gpe_count;
38139
38140-static struct attribute_group interrupt_stats_attr_group = {
38141+static attribute_group_no_const interrupt_stats_attr_group = {
38142 .name = "interrupts",
38143 };
38144
38145-static struct kobj_attribute *counter_attrs;
38146+static kobj_attribute_no_const *counter_attrs;
38147
38148 static void delete_gpe_attr_array(void)
38149 {
38150diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
38151index d72ce04..d6ab3c2 100644
38152--- a/drivers/ata/libahci.c
38153+++ b/drivers/ata/libahci.c
38154@@ -1257,7 +1257,7 @@ int ahci_kick_engine(struct ata_port *ap)
38155 }
38156 EXPORT_SYMBOL_GPL(ahci_kick_engine);
38157
38158-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
38159+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
38160 struct ata_taskfile *tf, int is_cmd, u16 flags,
38161 unsigned long timeout_msec)
38162 {
38163diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
38164index 677c0c1..354b89b 100644
38165--- a/drivers/ata/libata-core.c
38166+++ b/drivers/ata/libata-core.c
38167@@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
38168 static void ata_dev_xfermask(struct ata_device *dev);
38169 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
38170
38171-atomic_t ata_print_id = ATOMIC_INIT(0);
38172+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
38173
38174 struct ata_force_param {
38175 const char *name;
38176@@ -4863,7 +4863,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
38177 struct ata_port *ap;
38178 unsigned int tag;
38179
38180- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38181+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38182 ap = qc->ap;
38183
38184 qc->flags = 0;
38185@@ -4879,7 +4879,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
38186 struct ata_port *ap;
38187 struct ata_link *link;
38188
38189- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38190+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38191 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
38192 ap = qc->ap;
38193 link = qc->dev->link;
38194@@ -5983,6 +5983,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
38195 return;
38196
38197 spin_lock(&lock);
38198+ pax_open_kernel();
38199
38200 for (cur = ops->inherits; cur; cur = cur->inherits) {
38201 void **inherit = (void **)cur;
38202@@ -5996,8 +5997,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
38203 if (IS_ERR(*pp))
38204 *pp = NULL;
38205
38206- ops->inherits = NULL;
38207+ *(struct ata_port_operations **)&ops->inherits = NULL;
38208
38209+ pax_close_kernel();
38210 spin_unlock(&lock);
38211 }
38212
38213@@ -6193,7 +6195,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
38214
38215 /* give ports names and add SCSI hosts */
38216 for (i = 0; i < host->n_ports; i++) {
38217- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
38218+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
38219 host->ports[i]->local_port_no = i + 1;
38220 }
38221
38222diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
38223index 72691fd..ad104c0 100644
38224--- a/drivers/ata/libata-scsi.c
38225+++ b/drivers/ata/libata-scsi.c
38226@@ -4151,7 +4151,7 @@ int ata_sas_port_init(struct ata_port *ap)
38227
38228 if (rc)
38229 return rc;
38230- ap->print_id = atomic_inc_return(&ata_print_id);
38231+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
38232 return 0;
38233 }
38234 EXPORT_SYMBOL_GPL(ata_sas_port_init);
38235diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
38236index 45b5ab3..98446b8 100644
38237--- a/drivers/ata/libata.h
38238+++ b/drivers/ata/libata.h
38239@@ -53,7 +53,7 @@ enum {
38240 ATA_DNXFER_QUIET = (1 << 31),
38241 };
38242
38243-extern atomic_t ata_print_id;
38244+extern atomic_unchecked_t ata_print_id;
38245 extern int atapi_passthru16;
38246 extern int libata_fua;
38247 extern int libata_noacpi;
38248diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
38249index 4edb1a8..84e1658 100644
38250--- a/drivers/ata/pata_arasan_cf.c
38251+++ b/drivers/ata/pata_arasan_cf.c
38252@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
38253 /* Handle platform specific quirks */
38254 if (quirk) {
38255 if (quirk & CF_BROKEN_PIO) {
38256- ap->ops->set_piomode = NULL;
38257+ pax_open_kernel();
38258+ *(void **)&ap->ops->set_piomode = NULL;
38259+ pax_close_kernel();
38260 ap->pio_mask = 0;
38261 }
38262 if (quirk & CF_BROKEN_MWDMA)
38263diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
38264index f9b983a..887b9d8 100644
38265--- a/drivers/atm/adummy.c
38266+++ b/drivers/atm/adummy.c
38267@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
38268 vcc->pop(vcc, skb);
38269 else
38270 dev_kfree_skb_any(skb);
38271- atomic_inc(&vcc->stats->tx);
38272+ atomic_inc_unchecked(&vcc->stats->tx);
38273
38274 return 0;
38275 }
38276diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
38277index f1a9198..f466a4a 100644
38278--- a/drivers/atm/ambassador.c
38279+++ b/drivers/atm/ambassador.c
38280@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
38281 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
38282
38283 // VC layer stats
38284- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38285+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38286
38287 // free the descriptor
38288 kfree (tx_descr);
38289@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
38290 dump_skb ("<<<", vc, skb);
38291
38292 // VC layer stats
38293- atomic_inc(&atm_vcc->stats->rx);
38294+ atomic_inc_unchecked(&atm_vcc->stats->rx);
38295 __net_timestamp(skb);
38296 // end of our responsibility
38297 atm_vcc->push (atm_vcc, skb);
38298@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
38299 } else {
38300 PRINTK (KERN_INFO, "dropped over-size frame");
38301 // should we count this?
38302- atomic_inc(&atm_vcc->stats->rx_drop);
38303+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38304 }
38305
38306 } else {
38307@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
38308 }
38309
38310 if (check_area (skb->data, skb->len)) {
38311- atomic_inc(&atm_vcc->stats->tx_err);
38312+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
38313 return -ENOMEM; // ?
38314 }
38315
38316diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
38317index 0e3f8f9..765a7a5 100644
38318--- a/drivers/atm/atmtcp.c
38319+++ b/drivers/atm/atmtcp.c
38320@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38321 if (vcc->pop) vcc->pop(vcc,skb);
38322 else dev_kfree_skb(skb);
38323 if (dev_data) return 0;
38324- atomic_inc(&vcc->stats->tx_err);
38325+ atomic_inc_unchecked(&vcc->stats->tx_err);
38326 return -ENOLINK;
38327 }
38328 size = skb->len+sizeof(struct atmtcp_hdr);
38329@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38330 if (!new_skb) {
38331 if (vcc->pop) vcc->pop(vcc,skb);
38332 else dev_kfree_skb(skb);
38333- atomic_inc(&vcc->stats->tx_err);
38334+ atomic_inc_unchecked(&vcc->stats->tx_err);
38335 return -ENOBUFS;
38336 }
38337 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
38338@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38339 if (vcc->pop) vcc->pop(vcc,skb);
38340 else dev_kfree_skb(skb);
38341 out_vcc->push(out_vcc,new_skb);
38342- atomic_inc(&vcc->stats->tx);
38343- atomic_inc(&out_vcc->stats->rx);
38344+ atomic_inc_unchecked(&vcc->stats->tx);
38345+ atomic_inc_unchecked(&out_vcc->stats->rx);
38346 return 0;
38347 }
38348
38349@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
38350 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
38351 read_unlock(&vcc_sklist_lock);
38352 if (!out_vcc) {
38353- atomic_inc(&vcc->stats->tx_err);
38354+ atomic_inc_unchecked(&vcc->stats->tx_err);
38355 goto done;
38356 }
38357 skb_pull(skb,sizeof(struct atmtcp_hdr));
38358@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
38359 __net_timestamp(new_skb);
38360 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
38361 out_vcc->push(out_vcc,new_skb);
38362- atomic_inc(&vcc->stats->tx);
38363- atomic_inc(&out_vcc->stats->rx);
38364+ atomic_inc_unchecked(&vcc->stats->tx);
38365+ atomic_inc_unchecked(&out_vcc->stats->rx);
38366 done:
38367 if (vcc->pop) vcc->pop(vcc,skb);
38368 else dev_kfree_skb(skb);
38369diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
38370index b1955ba..b179940 100644
38371--- a/drivers/atm/eni.c
38372+++ b/drivers/atm/eni.c
38373@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
38374 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
38375 vcc->dev->number);
38376 length = 0;
38377- atomic_inc(&vcc->stats->rx_err);
38378+ atomic_inc_unchecked(&vcc->stats->rx_err);
38379 }
38380 else {
38381 length = ATM_CELL_SIZE-1; /* no HEC */
38382@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
38383 size);
38384 }
38385 eff = length = 0;
38386- atomic_inc(&vcc->stats->rx_err);
38387+ atomic_inc_unchecked(&vcc->stats->rx_err);
38388 }
38389 else {
38390 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
38391@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
38392 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
38393 vcc->dev->number,vcc->vci,length,size << 2,descr);
38394 length = eff = 0;
38395- atomic_inc(&vcc->stats->rx_err);
38396+ atomic_inc_unchecked(&vcc->stats->rx_err);
38397 }
38398 }
38399 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
38400@@ -767,7 +767,7 @@ rx_dequeued++;
38401 vcc->push(vcc,skb);
38402 pushed++;
38403 }
38404- atomic_inc(&vcc->stats->rx);
38405+ atomic_inc_unchecked(&vcc->stats->rx);
38406 }
38407 wake_up(&eni_dev->rx_wait);
38408 }
38409@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
38410 PCI_DMA_TODEVICE);
38411 if (vcc->pop) vcc->pop(vcc,skb);
38412 else dev_kfree_skb_irq(skb);
38413- atomic_inc(&vcc->stats->tx);
38414+ atomic_inc_unchecked(&vcc->stats->tx);
38415 wake_up(&eni_dev->tx_wait);
38416 dma_complete++;
38417 }
38418diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
38419index 82f2ae0..f205c02 100644
38420--- a/drivers/atm/firestream.c
38421+++ b/drivers/atm/firestream.c
38422@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
38423 }
38424 }
38425
38426- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38427+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38428
38429 fs_dprintk (FS_DEBUG_TXMEM, "i");
38430 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
38431@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
38432 #endif
38433 skb_put (skb, qe->p1 & 0xffff);
38434 ATM_SKB(skb)->vcc = atm_vcc;
38435- atomic_inc(&atm_vcc->stats->rx);
38436+ atomic_inc_unchecked(&atm_vcc->stats->rx);
38437 __net_timestamp(skb);
38438 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
38439 atm_vcc->push (atm_vcc, skb);
38440@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
38441 kfree (pe);
38442 }
38443 if (atm_vcc)
38444- atomic_inc(&atm_vcc->stats->rx_drop);
38445+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38446 break;
38447 case 0x1f: /* Reassembly abort: no buffers. */
38448 /* Silently increment error counter. */
38449 if (atm_vcc)
38450- atomic_inc(&atm_vcc->stats->rx_drop);
38451+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38452 break;
38453 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
38454 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
38455diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
38456index d4725fc..2d4ea65 100644
38457--- a/drivers/atm/fore200e.c
38458+++ b/drivers/atm/fore200e.c
38459@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
38460 #endif
38461 /* check error condition */
38462 if (*entry->status & STATUS_ERROR)
38463- atomic_inc(&vcc->stats->tx_err);
38464+ atomic_inc_unchecked(&vcc->stats->tx_err);
38465 else
38466- atomic_inc(&vcc->stats->tx);
38467+ atomic_inc_unchecked(&vcc->stats->tx);
38468 }
38469 }
38470
38471@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
38472 if (skb == NULL) {
38473 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
38474
38475- atomic_inc(&vcc->stats->rx_drop);
38476+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38477 return -ENOMEM;
38478 }
38479
38480@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
38481
38482 dev_kfree_skb_any(skb);
38483
38484- atomic_inc(&vcc->stats->rx_drop);
38485+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38486 return -ENOMEM;
38487 }
38488
38489 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
38490
38491 vcc->push(vcc, skb);
38492- atomic_inc(&vcc->stats->rx);
38493+ atomic_inc_unchecked(&vcc->stats->rx);
38494
38495 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
38496
38497@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
38498 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
38499 fore200e->atm_dev->number,
38500 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
38501- atomic_inc(&vcc->stats->rx_err);
38502+ atomic_inc_unchecked(&vcc->stats->rx_err);
38503 }
38504 }
38505
38506@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
38507 goto retry_here;
38508 }
38509
38510- atomic_inc(&vcc->stats->tx_err);
38511+ atomic_inc_unchecked(&vcc->stats->tx_err);
38512
38513 fore200e->tx_sat++;
38514 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
38515diff --git a/drivers/atm/he.c b/drivers/atm/he.c
38516index aa6be26..f70a785 100644
38517--- a/drivers/atm/he.c
38518+++ b/drivers/atm/he.c
38519@@ -1690,7 +1690,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38520
38521 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
38522 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
38523- atomic_inc(&vcc->stats->rx_drop);
38524+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38525 goto return_host_buffers;
38526 }
38527
38528@@ -1717,7 +1717,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38529 RBRQ_LEN_ERR(he_dev->rbrq_head)
38530 ? "LEN_ERR" : "",
38531 vcc->vpi, vcc->vci);
38532- atomic_inc(&vcc->stats->rx_err);
38533+ atomic_inc_unchecked(&vcc->stats->rx_err);
38534 goto return_host_buffers;
38535 }
38536
38537@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38538 vcc->push(vcc, skb);
38539 spin_lock(&he_dev->global_lock);
38540
38541- atomic_inc(&vcc->stats->rx);
38542+ atomic_inc_unchecked(&vcc->stats->rx);
38543
38544 return_host_buffers:
38545 ++pdus_assembled;
38546@@ -2095,7 +2095,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
38547 tpd->vcc->pop(tpd->vcc, tpd->skb);
38548 else
38549 dev_kfree_skb_any(tpd->skb);
38550- atomic_inc(&tpd->vcc->stats->tx_err);
38551+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
38552 }
38553 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
38554 return;
38555@@ -2507,7 +2507,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38556 vcc->pop(vcc, skb);
38557 else
38558 dev_kfree_skb_any(skb);
38559- atomic_inc(&vcc->stats->tx_err);
38560+ atomic_inc_unchecked(&vcc->stats->tx_err);
38561 return -EINVAL;
38562 }
38563
38564@@ -2518,7 +2518,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38565 vcc->pop(vcc, skb);
38566 else
38567 dev_kfree_skb_any(skb);
38568- atomic_inc(&vcc->stats->tx_err);
38569+ atomic_inc_unchecked(&vcc->stats->tx_err);
38570 return -EINVAL;
38571 }
38572 #endif
38573@@ -2530,7 +2530,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38574 vcc->pop(vcc, skb);
38575 else
38576 dev_kfree_skb_any(skb);
38577- atomic_inc(&vcc->stats->tx_err);
38578+ atomic_inc_unchecked(&vcc->stats->tx_err);
38579 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38580 return -ENOMEM;
38581 }
38582@@ -2572,7 +2572,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38583 vcc->pop(vcc, skb);
38584 else
38585 dev_kfree_skb_any(skb);
38586- atomic_inc(&vcc->stats->tx_err);
38587+ atomic_inc_unchecked(&vcc->stats->tx_err);
38588 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38589 return -ENOMEM;
38590 }
38591@@ -2603,7 +2603,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38592 __enqueue_tpd(he_dev, tpd, cid);
38593 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38594
38595- atomic_inc(&vcc->stats->tx);
38596+ atomic_inc_unchecked(&vcc->stats->tx);
38597
38598 return 0;
38599 }
38600diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
38601index 1dc0519..1aadaf7 100644
38602--- a/drivers/atm/horizon.c
38603+++ b/drivers/atm/horizon.c
38604@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
38605 {
38606 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
38607 // VC layer stats
38608- atomic_inc(&vcc->stats->rx);
38609+ atomic_inc_unchecked(&vcc->stats->rx);
38610 __net_timestamp(skb);
38611 // end of our responsibility
38612 vcc->push (vcc, skb);
38613@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
38614 dev->tx_iovec = NULL;
38615
38616 // VC layer stats
38617- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38618+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38619
38620 // free the skb
38621 hrz_kfree_skb (skb);
38622diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
38623index b621f56..1e3a799 100644
38624--- a/drivers/atm/idt77252.c
38625+++ b/drivers/atm/idt77252.c
38626@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
38627 else
38628 dev_kfree_skb(skb);
38629
38630- atomic_inc(&vcc->stats->tx);
38631+ atomic_inc_unchecked(&vcc->stats->tx);
38632 }
38633
38634 atomic_dec(&scq->used);
38635@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38636 if ((sb = dev_alloc_skb(64)) == NULL) {
38637 printk("%s: Can't allocate buffers for aal0.\n",
38638 card->name);
38639- atomic_add(i, &vcc->stats->rx_drop);
38640+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
38641 break;
38642 }
38643 if (!atm_charge(vcc, sb->truesize)) {
38644 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
38645 card->name);
38646- atomic_add(i - 1, &vcc->stats->rx_drop);
38647+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
38648 dev_kfree_skb(sb);
38649 break;
38650 }
38651@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38652 ATM_SKB(sb)->vcc = vcc;
38653 __net_timestamp(sb);
38654 vcc->push(vcc, sb);
38655- atomic_inc(&vcc->stats->rx);
38656+ atomic_inc_unchecked(&vcc->stats->rx);
38657
38658 cell += ATM_CELL_PAYLOAD;
38659 }
38660@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38661 "(CDC: %08x)\n",
38662 card->name, len, rpp->len, readl(SAR_REG_CDC));
38663 recycle_rx_pool_skb(card, rpp);
38664- atomic_inc(&vcc->stats->rx_err);
38665+ atomic_inc_unchecked(&vcc->stats->rx_err);
38666 return;
38667 }
38668 if (stat & SAR_RSQE_CRC) {
38669 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
38670 recycle_rx_pool_skb(card, rpp);
38671- atomic_inc(&vcc->stats->rx_err);
38672+ atomic_inc_unchecked(&vcc->stats->rx_err);
38673 return;
38674 }
38675 if (skb_queue_len(&rpp->queue) > 1) {
38676@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38677 RXPRINTK("%s: Can't alloc RX skb.\n",
38678 card->name);
38679 recycle_rx_pool_skb(card, rpp);
38680- atomic_inc(&vcc->stats->rx_err);
38681+ atomic_inc_unchecked(&vcc->stats->rx_err);
38682 return;
38683 }
38684 if (!atm_charge(vcc, skb->truesize)) {
38685@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38686 __net_timestamp(skb);
38687
38688 vcc->push(vcc, skb);
38689- atomic_inc(&vcc->stats->rx);
38690+ atomic_inc_unchecked(&vcc->stats->rx);
38691
38692 return;
38693 }
38694@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38695 __net_timestamp(skb);
38696
38697 vcc->push(vcc, skb);
38698- atomic_inc(&vcc->stats->rx);
38699+ atomic_inc_unchecked(&vcc->stats->rx);
38700
38701 if (skb->truesize > SAR_FB_SIZE_3)
38702 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
38703@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
38704 if (vcc->qos.aal != ATM_AAL0) {
38705 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
38706 card->name, vpi, vci);
38707- atomic_inc(&vcc->stats->rx_drop);
38708+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38709 goto drop;
38710 }
38711
38712 if ((sb = dev_alloc_skb(64)) == NULL) {
38713 printk("%s: Can't allocate buffers for AAL0.\n",
38714 card->name);
38715- atomic_inc(&vcc->stats->rx_err);
38716+ atomic_inc_unchecked(&vcc->stats->rx_err);
38717 goto drop;
38718 }
38719
38720@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
38721 ATM_SKB(sb)->vcc = vcc;
38722 __net_timestamp(sb);
38723 vcc->push(vcc, sb);
38724- atomic_inc(&vcc->stats->rx);
38725+ atomic_inc_unchecked(&vcc->stats->rx);
38726
38727 drop:
38728 skb_pull(queue, 64);
38729@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38730
38731 if (vc == NULL) {
38732 printk("%s: NULL connection in send().\n", card->name);
38733- atomic_inc(&vcc->stats->tx_err);
38734+ atomic_inc_unchecked(&vcc->stats->tx_err);
38735 dev_kfree_skb(skb);
38736 return -EINVAL;
38737 }
38738 if (!test_bit(VCF_TX, &vc->flags)) {
38739 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
38740- atomic_inc(&vcc->stats->tx_err);
38741+ atomic_inc_unchecked(&vcc->stats->tx_err);
38742 dev_kfree_skb(skb);
38743 return -EINVAL;
38744 }
38745@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38746 break;
38747 default:
38748 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
38749- atomic_inc(&vcc->stats->tx_err);
38750+ atomic_inc_unchecked(&vcc->stats->tx_err);
38751 dev_kfree_skb(skb);
38752 return -EINVAL;
38753 }
38754
38755 if (skb_shinfo(skb)->nr_frags != 0) {
38756 printk("%s: No scatter-gather yet.\n", card->name);
38757- atomic_inc(&vcc->stats->tx_err);
38758+ atomic_inc_unchecked(&vcc->stats->tx_err);
38759 dev_kfree_skb(skb);
38760 return -EINVAL;
38761 }
38762@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38763
38764 err = queue_skb(card, vc, skb, oam);
38765 if (err) {
38766- atomic_inc(&vcc->stats->tx_err);
38767+ atomic_inc_unchecked(&vcc->stats->tx_err);
38768 dev_kfree_skb(skb);
38769 return err;
38770 }
38771@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
38772 skb = dev_alloc_skb(64);
38773 if (!skb) {
38774 printk("%s: Out of memory in send_oam().\n", card->name);
38775- atomic_inc(&vcc->stats->tx_err);
38776+ atomic_inc_unchecked(&vcc->stats->tx_err);
38777 return -ENOMEM;
38778 }
38779 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
38780diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
38781index 4217f29..88f547a 100644
38782--- a/drivers/atm/iphase.c
38783+++ b/drivers/atm/iphase.c
38784@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
38785 status = (u_short) (buf_desc_ptr->desc_mode);
38786 if (status & (RX_CER | RX_PTE | RX_OFL))
38787 {
38788- atomic_inc(&vcc->stats->rx_err);
38789+ atomic_inc_unchecked(&vcc->stats->rx_err);
38790 IF_ERR(printk("IA: bad packet, dropping it");)
38791 if (status & RX_CER) {
38792 IF_ERR(printk(" cause: packet CRC error\n");)
38793@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
38794 len = dma_addr - buf_addr;
38795 if (len > iadev->rx_buf_sz) {
38796 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
38797- atomic_inc(&vcc->stats->rx_err);
38798+ atomic_inc_unchecked(&vcc->stats->rx_err);
38799 goto out_free_desc;
38800 }
38801
38802@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38803 ia_vcc = INPH_IA_VCC(vcc);
38804 if (ia_vcc == NULL)
38805 {
38806- atomic_inc(&vcc->stats->rx_err);
38807+ atomic_inc_unchecked(&vcc->stats->rx_err);
38808 atm_return(vcc, skb->truesize);
38809 dev_kfree_skb_any(skb);
38810 goto INCR_DLE;
38811@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38812 if ((length > iadev->rx_buf_sz) || (length >
38813 (skb->len - sizeof(struct cpcs_trailer))))
38814 {
38815- atomic_inc(&vcc->stats->rx_err);
38816+ atomic_inc_unchecked(&vcc->stats->rx_err);
38817 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
38818 length, skb->len);)
38819 atm_return(vcc, skb->truesize);
38820@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38821
38822 IF_RX(printk("rx_dle_intr: skb push");)
38823 vcc->push(vcc,skb);
38824- atomic_inc(&vcc->stats->rx);
38825+ atomic_inc_unchecked(&vcc->stats->rx);
38826 iadev->rx_pkt_cnt++;
38827 }
38828 INCR_DLE:
38829@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
38830 {
38831 struct k_sonet_stats *stats;
38832 stats = &PRIV(_ia_dev[board])->sonet_stats;
38833- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
38834- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
38835- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
38836- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
38837- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
38838- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
38839- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
38840- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
38841- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
38842+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
38843+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
38844+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
38845+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
38846+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
38847+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
38848+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
38849+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
38850+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
38851 }
38852 ia_cmds.status = 0;
38853 break;
38854@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
38855 if ((desc == 0) || (desc > iadev->num_tx_desc))
38856 {
38857 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
38858- atomic_inc(&vcc->stats->tx);
38859+ atomic_inc_unchecked(&vcc->stats->tx);
38860 if (vcc->pop)
38861 vcc->pop(vcc, skb);
38862 else
38863@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
38864 ATM_DESC(skb) = vcc->vci;
38865 skb_queue_tail(&iadev->tx_dma_q, skb);
38866
38867- atomic_inc(&vcc->stats->tx);
38868+ atomic_inc_unchecked(&vcc->stats->tx);
38869 iadev->tx_pkt_cnt++;
38870 /* Increment transaction counter */
38871 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
38872
38873 #if 0
38874 /* add flow control logic */
38875- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
38876+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
38877 if (iavcc->vc_desc_cnt > 10) {
38878 vcc->tx_quota = vcc->tx_quota * 3 / 4;
38879 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
38880diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
38881index fa7d7019..1e404c7 100644
38882--- a/drivers/atm/lanai.c
38883+++ b/drivers/atm/lanai.c
38884@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
38885 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
38886 lanai_endtx(lanai, lvcc);
38887 lanai_free_skb(lvcc->tx.atmvcc, skb);
38888- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
38889+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
38890 }
38891
38892 /* Try to fill the buffer - don't call unless there is backlog */
38893@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
38894 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
38895 __net_timestamp(skb);
38896 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
38897- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
38898+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
38899 out:
38900 lvcc->rx.buf.ptr = end;
38901 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
38902@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38903 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
38904 "vcc %d\n", lanai->number, (unsigned int) s, vci);
38905 lanai->stats.service_rxnotaal5++;
38906- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38907+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38908 return 0;
38909 }
38910 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
38911@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38912 int bytes;
38913 read_unlock(&vcc_sklist_lock);
38914 DPRINTK("got trashed rx pdu on vci %d\n", vci);
38915- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38916+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38917 lvcc->stats.x.aal5.service_trash++;
38918 bytes = (SERVICE_GET_END(s) * 16) -
38919 (((unsigned long) lvcc->rx.buf.ptr) -
38920@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38921 }
38922 if (s & SERVICE_STREAM) {
38923 read_unlock(&vcc_sklist_lock);
38924- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38925+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38926 lvcc->stats.x.aal5.service_stream++;
38927 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
38928 "PDU on VCI %d!\n", lanai->number, vci);
38929@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38930 return 0;
38931 }
38932 DPRINTK("got rx crc error on vci %d\n", vci);
38933- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38934+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38935 lvcc->stats.x.aal5.service_rxcrc++;
38936 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
38937 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
38938diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
38939index 9988ac9..7c52585 100644
38940--- a/drivers/atm/nicstar.c
38941+++ b/drivers/atm/nicstar.c
38942@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38943 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
38944 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
38945 card->index);
38946- atomic_inc(&vcc->stats->tx_err);
38947+ atomic_inc_unchecked(&vcc->stats->tx_err);
38948 dev_kfree_skb_any(skb);
38949 return -EINVAL;
38950 }
38951@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38952 if (!vc->tx) {
38953 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
38954 card->index);
38955- atomic_inc(&vcc->stats->tx_err);
38956+ atomic_inc_unchecked(&vcc->stats->tx_err);
38957 dev_kfree_skb_any(skb);
38958 return -EINVAL;
38959 }
38960@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38961 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
38962 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
38963 card->index);
38964- atomic_inc(&vcc->stats->tx_err);
38965+ atomic_inc_unchecked(&vcc->stats->tx_err);
38966 dev_kfree_skb_any(skb);
38967 return -EINVAL;
38968 }
38969
38970 if (skb_shinfo(skb)->nr_frags != 0) {
38971 printk("nicstar%d: No scatter-gather yet.\n", card->index);
38972- atomic_inc(&vcc->stats->tx_err);
38973+ atomic_inc_unchecked(&vcc->stats->tx_err);
38974 dev_kfree_skb_any(skb);
38975 return -EINVAL;
38976 }
38977@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38978 }
38979
38980 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
38981- atomic_inc(&vcc->stats->tx_err);
38982+ atomic_inc_unchecked(&vcc->stats->tx_err);
38983 dev_kfree_skb_any(skb);
38984 return -EIO;
38985 }
38986- atomic_inc(&vcc->stats->tx);
38987+ atomic_inc_unchecked(&vcc->stats->tx);
38988
38989 return 0;
38990 }
38991@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38992 printk
38993 ("nicstar%d: Can't allocate buffers for aal0.\n",
38994 card->index);
38995- atomic_add(i, &vcc->stats->rx_drop);
38996+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
38997 break;
38998 }
38999 if (!atm_charge(vcc, sb->truesize)) {
39000 RXPRINTK
39001 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
39002 card->index);
39003- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
39004+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
39005 dev_kfree_skb_any(sb);
39006 break;
39007 }
39008@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39009 ATM_SKB(sb)->vcc = vcc;
39010 __net_timestamp(sb);
39011 vcc->push(vcc, sb);
39012- atomic_inc(&vcc->stats->rx);
39013+ atomic_inc_unchecked(&vcc->stats->rx);
39014 cell += ATM_CELL_PAYLOAD;
39015 }
39016
39017@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39018 if (iovb == NULL) {
39019 printk("nicstar%d: Out of iovec buffers.\n",
39020 card->index);
39021- atomic_inc(&vcc->stats->rx_drop);
39022+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39023 recycle_rx_buf(card, skb);
39024 return;
39025 }
39026@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39027 small or large buffer itself. */
39028 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
39029 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
39030- atomic_inc(&vcc->stats->rx_err);
39031+ atomic_inc_unchecked(&vcc->stats->rx_err);
39032 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
39033 NS_MAX_IOVECS);
39034 NS_PRV_IOVCNT(iovb) = 0;
39035@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39036 ("nicstar%d: Expected a small buffer, and this is not one.\n",
39037 card->index);
39038 which_list(card, skb);
39039- atomic_inc(&vcc->stats->rx_err);
39040+ atomic_inc_unchecked(&vcc->stats->rx_err);
39041 recycle_rx_buf(card, skb);
39042 vc->rx_iov = NULL;
39043 recycle_iov_buf(card, iovb);
39044@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39045 ("nicstar%d: Expected a large buffer, and this is not one.\n",
39046 card->index);
39047 which_list(card, skb);
39048- atomic_inc(&vcc->stats->rx_err);
39049+ atomic_inc_unchecked(&vcc->stats->rx_err);
39050 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
39051 NS_PRV_IOVCNT(iovb));
39052 vc->rx_iov = NULL;
39053@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39054 printk(" - PDU size mismatch.\n");
39055 else
39056 printk(".\n");
39057- atomic_inc(&vcc->stats->rx_err);
39058+ atomic_inc_unchecked(&vcc->stats->rx_err);
39059 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
39060 NS_PRV_IOVCNT(iovb));
39061 vc->rx_iov = NULL;
39062@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39063 /* skb points to a small buffer */
39064 if (!atm_charge(vcc, skb->truesize)) {
39065 push_rxbufs(card, skb);
39066- atomic_inc(&vcc->stats->rx_drop);
39067+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39068 } else {
39069 skb_put(skb, len);
39070 dequeue_sm_buf(card, skb);
39071@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39072 ATM_SKB(skb)->vcc = vcc;
39073 __net_timestamp(skb);
39074 vcc->push(vcc, skb);
39075- atomic_inc(&vcc->stats->rx);
39076+ atomic_inc_unchecked(&vcc->stats->rx);
39077 }
39078 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
39079 struct sk_buff *sb;
39080@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39081 if (len <= NS_SMBUFSIZE) {
39082 if (!atm_charge(vcc, sb->truesize)) {
39083 push_rxbufs(card, sb);
39084- atomic_inc(&vcc->stats->rx_drop);
39085+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39086 } else {
39087 skb_put(sb, len);
39088 dequeue_sm_buf(card, sb);
39089@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39090 ATM_SKB(sb)->vcc = vcc;
39091 __net_timestamp(sb);
39092 vcc->push(vcc, sb);
39093- atomic_inc(&vcc->stats->rx);
39094+ atomic_inc_unchecked(&vcc->stats->rx);
39095 }
39096
39097 push_rxbufs(card, skb);
39098@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39099
39100 if (!atm_charge(vcc, skb->truesize)) {
39101 push_rxbufs(card, skb);
39102- atomic_inc(&vcc->stats->rx_drop);
39103+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39104 } else {
39105 dequeue_lg_buf(card, skb);
39106 #ifdef NS_USE_DESTRUCTORS
39107@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39108 ATM_SKB(skb)->vcc = vcc;
39109 __net_timestamp(skb);
39110 vcc->push(vcc, skb);
39111- atomic_inc(&vcc->stats->rx);
39112+ atomic_inc_unchecked(&vcc->stats->rx);
39113 }
39114
39115 push_rxbufs(card, sb);
39116@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39117 printk
39118 ("nicstar%d: Out of huge buffers.\n",
39119 card->index);
39120- atomic_inc(&vcc->stats->rx_drop);
39121+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39122 recycle_iovec_rx_bufs(card,
39123 (struct iovec *)
39124 iovb->data,
39125@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39126 card->hbpool.count++;
39127 } else
39128 dev_kfree_skb_any(hb);
39129- atomic_inc(&vcc->stats->rx_drop);
39130+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39131 } else {
39132 /* Copy the small buffer to the huge buffer */
39133 sb = (struct sk_buff *)iov->iov_base;
39134@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39135 #endif /* NS_USE_DESTRUCTORS */
39136 __net_timestamp(hb);
39137 vcc->push(vcc, hb);
39138- atomic_inc(&vcc->stats->rx);
39139+ atomic_inc_unchecked(&vcc->stats->rx);
39140 }
39141 }
39142
39143diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
39144index 943cf0d..37d15d5 100644
39145--- a/drivers/atm/solos-pci.c
39146+++ b/drivers/atm/solos-pci.c
39147@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
39148 }
39149 atm_charge(vcc, skb->truesize);
39150 vcc->push(vcc, skb);
39151- atomic_inc(&vcc->stats->rx);
39152+ atomic_inc_unchecked(&vcc->stats->rx);
39153 break;
39154
39155 case PKT_STATUS:
39156@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
39157 vcc = SKB_CB(oldskb)->vcc;
39158
39159 if (vcc) {
39160- atomic_inc(&vcc->stats->tx);
39161+ atomic_inc_unchecked(&vcc->stats->tx);
39162 solos_pop(vcc, oldskb);
39163 } else {
39164 dev_kfree_skb_irq(oldskb);
39165diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
39166index 0215934..ce9f5b1 100644
39167--- a/drivers/atm/suni.c
39168+++ b/drivers/atm/suni.c
39169@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
39170
39171
39172 #define ADD_LIMITED(s,v) \
39173- atomic_add((v),&stats->s); \
39174- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
39175+ atomic_add_unchecked((v),&stats->s); \
39176+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
39177
39178
39179 static void suni_hz(unsigned long from_timer)
39180diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
39181index 5120a96..e2572bd 100644
39182--- a/drivers/atm/uPD98402.c
39183+++ b/drivers/atm/uPD98402.c
39184@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
39185 struct sonet_stats tmp;
39186 int error = 0;
39187
39188- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
39189+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
39190 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
39191 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
39192 if (zero && !error) {
39193@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
39194
39195
39196 #define ADD_LIMITED(s,v) \
39197- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
39198- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
39199- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
39200+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
39201+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
39202+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
39203
39204
39205 static void stat_event(struct atm_dev *dev)
39206@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
39207 if (reason & uPD98402_INT_PFM) stat_event(dev);
39208 if (reason & uPD98402_INT_PCO) {
39209 (void) GET(PCOCR); /* clear interrupt cause */
39210- atomic_add(GET(HECCT),
39211+ atomic_add_unchecked(GET(HECCT),
39212 &PRIV(dev)->sonet_stats.uncorr_hcs);
39213 }
39214 if ((reason & uPD98402_INT_RFO) &&
39215@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
39216 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
39217 uPD98402_INT_LOS),PIMR); /* enable them */
39218 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
39219- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
39220- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
39221- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
39222+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
39223+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
39224+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
39225 return 0;
39226 }
39227
39228diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
39229index 969c3c2..9b72956 100644
39230--- a/drivers/atm/zatm.c
39231+++ b/drivers/atm/zatm.c
39232@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
39233 }
39234 if (!size) {
39235 dev_kfree_skb_irq(skb);
39236- if (vcc) atomic_inc(&vcc->stats->rx_err);
39237+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
39238 continue;
39239 }
39240 if (!atm_charge(vcc,skb->truesize)) {
39241@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
39242 skb->len = size;
39243 ATM_SKB(skb)->vcc = vcc;
39244 vcc->push(vcc,skb);
39245- atomic_inc(&vcc->stats->rx);
39246+ atomic_inc_unchecked(&vcc->stats->rx);
39247 }
39248 zout(pos & 0xffff,MTA(mbx));
39249 #if 0 /* probably a stupid idea */
39250@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
39251 skb_queue_head(&zatm_vcc->backlog,skb);
39252 break;
39253 }
39254- atomic_inc(&vcc->stats->tx);
39255+ atomic_inc_unchecked(&vcc->stats->tx);
39256 wake_up(&zatm_vcc->tx_wait);
39257 }
39258
39259diff --git a/drivers/base/bus.c b/drivers/base/bus.c
39260index 83e910a..b224a73 100644
39261--- a/drivers/base/bus.c
39262+++ b/drivers/base/bus.c
39263@@ -1124,7 +1124,7 @@ int subsys_interface_register(struct subsys_interface *sif)
39264 return -EINVAL;
39265
39266 mutex_lock(&subsys->p->mutex);
39267- list_add_tail(&sif->node, &subsys->p->interfaces);
39268+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
39269 if (sif->add_dev) {
39270 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
39271 while ((dev = subsys_dev_iter_next(&iter)))
39272@@ -1149,7 +1149,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
39273 subsys = sif->subsys;
39274
39275 mutex_lock(&subsys->p->mutex);
39276- list_del_init(&sif->node);
39277+ pax_list_del_init((struct list_head *)&sif->node);
39278 if (sif->remove_dev) {
39279 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
39280 while ((dev = subsys_dev_iter_next(&iter)))
39281diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
39282index 25798db..15f130e 100644
39283--- a/drivers/base/devtmpfs.c
39284+++ b/drivers/base/devtmpfs.c
39285@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
39286 if (!thread)
39287 return 0;
39288
39289- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
39290+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
39291 if (err)
39292 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
39293 else
39294@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
39295 *err = sys_unshare(CLONE_NEWNS);
39296 if (*err)
39297 goto out;
39298- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
39299+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
39300 if (*err)
39301 goto out;
39302- sys_chdir("/.."); /* will traverse into overmounted root */
39303- sys_chroot(".");
39304+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
39305+ sys_chroot((char __force_user *)".");
39306 complete(&setup_done);
39307 while (1) {
39308 spin_lock(&req_lock);
39309diff --git a/drivers/base/node.c b/drivers/base/node.c
39310index 8f7ed99..700dd0c 100644
39311--- a/drivers/base/node.c
39312+++ b/drivers/base/node.c
39313@@ -624,7 +624,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
39314 struct node_attr {
39315 struct device_attribute attr;
39316 enum node_states state;
39317-};
39318+} __do_const;
39319
39320 static ssize_t show_node_state(struct device *dev,
39321 struct device_attribute *attr, char *buf)
39322diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
39323index eee55c1..b8c9393 100644
39324--- a/drivers/base/power/domain.c
39325+++ b/drivers/base/power/domain.c
39326@@ -1821,9 +1821,9 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
39327
39328 if (dev->power.subsys_data->domain_data) {
39329 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
39330- gpd_data->ops = (struct gpd_dev_ops){ NULL };
39331+ memset(&gpd_data->ops, 0, sizeof(gpd_data->ops));
39332 if (clear_td)
39333- gpd_data->td = (struct gpd_timing_data){ 0 };
39334+ memset(&gpd_data->td, 0, sizeof(gpd_data->td));
39335
39336 if (--gpd_data->refcount == 0) {
39337 dev->power.subsys_data->domain_data = NULL;
39338@@ -1862,7 +1862,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
39339 {
39340 struct cpuidle_driver *cpuidle_drv;
39341 struct gpd_cpu_data *cpu_data;
39342- struct cpuidle_state *idle_state;
39343+ cpuidle_state_no_const *idle_state;
39344 int ret = 0;
39345
39346 if (IS_ERR_OR_NULL(genpd) || state < 0)
39347@@ -1930,7 +1930,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
39348 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
39349 {
39350 struct gpd_cpu_data *cpu_data;
39351- struct cpuidle_state *idle_state;
39352+ cpuidle_state_no_const *idle_state;
39353 int ret = 0;
39354
39355 if (IS_ERR_OR_NULL(genpd))
39356diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
39357index 95b181d1..c4f0e19 100644
39358--- a/drivers/base/power/sysfs.c
39359+++ b/drivers/base/power/sysfs.c
39360@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
39361 return -EIO;
39362 }
39363 }
39364- return sprintf(buf, p);
39365+ return sprintf(buf, "%s", p);
39366 }
39367
39368 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
39369diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
39370index eb1bd2e..2667d3a 100644
39371--- a/drivers/base/power/wakeup.c
39372+++ b/drivers/base/power/wakeup.c
39373@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
39374 * They need to be modified together atomically, so it's better to use one
39375 * atomic variable to hold them both.
39376 */
39377-static atomic_t combined_event_count = ATOMIC_INIT(0);
39378+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
39379
39380 #define IN_PROGRESS_BITS (sizeof(int) * 4)
39381 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
39382
39383 static void split_counters(unsigned int *cnt, unsigned int *inpr)
39384 {
39385- unsigned int comb = atomic_read(&combined_event_count);
39386+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
39387
39388 *cnt = (comb >> IN_PROGRESS_BITS);
39389 *inpr = comb & MAX_IN_PROGRESS;
39390@@ -401,7 +401,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
39391 ws->start_prevent_time = ws->last_time;
39392
39393 /* Increment the counter of events in progress. */
39394- cec = atomic_inc_return(&combined_event_count);
39395+ cec = atomic_inc_return_unchecked(&combined_event_count);
39396
39397 trace_wakeup_source_activate(ws->name, cec);
39398 }
39399@@ -527,7 +527,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
39400 * Increment the counter of registered wakeup events and decrement the
39401 * couter of wakeup events in progress simultaneously.
39402 */
39403- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
39404+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
39405 trace_wakeup_source_deactivate(ws->name, cec);
39406
39407 split_counters(&cnt, &inpr);
39408diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
39409index dbb8350..4762f4c 100644
39410--- a/drivers/base/syscore.c
39411+++ b/drivers/base/syscore.c
39412@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
39413 void register_syscore_ops(struct syscore_ops *ops)
39414 {
39415 mutex_lock(&syscore_ops_lock);
39416- list_add_tail(&ops->node, &syscore_ops_list);
39417+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
39418 mutex_unlock(&syscore_ops_lock);
39419 }
39420 EXPORT_SYMBOL_GPL(register_syscore_ops);
39421@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
39422 void unregister_syscore_ops(struct syscore_ops *ops)
39423 {
39424 mutex_lock(&syscore_ops_lock);
39425- list_del(&ops->node);
39426+ pax_list_del((struct list_head *)&ops->node);
39427 mutex_unlock(&syscore_ops_lock);
39428 }
39429 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
39430diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
39431index 4595c22..d4f6c54 100644
39432--- a/drivers/block/cciss.c
39433+++ b/drivers/block/cciss.c
39434@@ -3011,7 +3011,7 @@ static void start_io(ctlr_info_t *h)
39435 while (!list_empty(&h->reqQ)) {
39436 c = list_entry(h->reqQ.next, CommandList_struct, list);
39437 /* can't do anything if fifo is full */
39438- if ((h->access.fifo_full(h))) {
39439+ if ((h->access->fifo_full(h))) {
39440 dev_warn(&h->pdev->dev, "fifo full\n");
39441 break;
39442 }
39443@@ -3021,7 +3021,7 @@ static void start_io(ctlr_info_t *h)
39444 h->Qdepth--;
39445
39446 /* Tell the controller execute command */
39447- h->access.submit_command(h, c);
39448+ h->access->submit_command(h, c);
39449
39450 /* Put job onto the completed Q */
39451 addQ(&h->cmpQ, c);
39452@@ -3447,17 +3447,17 @@ startio:
39453
39454 static inline unsigned long get_next_completion(ctlr_info_t *h)
39455 {
39456- return h->access.command_completed(h);
39457+ return h->access->command_completed(h);
39458 }
39459
39460 static inline int interrupt_pending(ctlr_info_t *h)
39461 {
39462- return h->access.intr_pending(h);
39463+ return h->access->intr_pending(h);
39464 }
39465
39466 static inline long interrupt_not_for_us(ctlr_info_t *h)
39467 {
39468- return ((h->access.intr_pending(h) == 0) ||
39469+ return ((h->access->intr_pending(h) == 0) ||
39470 (h->interrupts_enabled == 0));
39471 }
39472
39473@@ -3490,7 +3490,7 @@ static inline u32 next_command(ctlr_info_t *h)
39474 u32 a;
39475
39476 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
39477- return h->access.command_completed(h);
39478+ return h->access->command_completed(h);
39479
39480 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
39481 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
39482@@ -4047,7 +4047,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
39483 trans_support & CFGTBL_Trans_use_short_tags);
39484
39485 /* Change the access methods to the performant access methods */
39486- h->access = SA5_performant_access;
39487+ h->access = &SA5_performant_access;
39488 h->transMethod = CFGTBL_Trans_Performant;
39489
39490 return;
39491@@ -4321,7 +4321,7 @@ static int cciss_pci_init(ctlr_info_t *h)
39492 if (prod_index < 0)
39493 return -ENODEV;
39494 h->product_name = products[prod_index].product_name;
39495- h->access = *(products[prod_index].access);
39496+ h->access = products[prod_index].access;
39497
39498 if (cciss_board_disabled(h)) {
39499 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
39500@@ -5053,7 +5053,7 @@ reinit_after_soft_reset:
39501 }
39502
39503 /* make sure the board interrupts are off */
39504- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39505+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39506 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
39507 if (rc)
39508 goto clean2;
39509@@ -5103,7 +5103,7 @@ reinit_after_soft_reset:
39510 * fake ones to scoop up any residual completions.
39511 */
39512 spin_lock_irqsave(&h->lock, flags);
39513- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39514+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39515 spin_unlock_irqrestore(&h->lock, flags);
39516 free_irq(h->intr[h->intr_mode], h);
39517 rc = cciss_request_irq(h, cciss_msix_discard_completions,
39518@@ -5123,9 +5123,9 @@ reinit_after_soft_reset:
39519 dev_info(&h->pdev->dev, "Board READY.\n");
39520 dev_info(&h->pdev->dev,
39521 "Waiting for stale completions to drain.\n");
39522- h->access.set_intr_mask(h, CCISS_INTR_ON);
39523+ h->access->set_intr_mask(h, CCISS_INTR_ON);
39524 msleep(10000);
39525- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39526+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39527
39528 rc = controller_reset_failed(h->cfgtable);
39529 if (rc)
39530@@ -5148,7 +5148,7 @@ reinit_after_soft_reset:
39531 cciss_scsi_setup(h);
39532
39533 /* Turn the interrupts on so we can service requests */
39534- h->access.set_intr_mask(h, CCISS_INTR_ON);
39535+ h->access->set_intr_mask(h, CCISS_INTR_ON);
39536
39537 /* Get the firmware version */
39538 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
39539@@ -5220,7 +5220,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
39540 kfree(flush_buf);
39541 if (return_code != IO_OK)
39542 dev_warn(&h->pdev->dev, "Error flushing cache\n");
39543- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39544+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39545 free_irq(h->intr[h->intr_mode], h);
39546 }
39547
39548diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
39549index 7fda30e..2f27946 100644
39550--- a/drivers/block/cciss.h
39551+++ b/drivers/block/cciss.h
39552@@ -101,7 +101,7 @@ struct ctlr_info
39553 /* information about each logical volume */
39554 drive_info_struct *drv[CISS_MAX_LUN];
39555
39556- struct access_method access;
39557+ struct access_method *access;
39558
39559 /* queue and queue Info */
39560 struct list_head reqQ;
39561@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
39562 }
39563
39564 static struct access_method SA5_access = {
39565- SA5_submit_command,
39566- SA5_intr_mask,
39567- SA5_fifo_full,
39568- SA5_intr_pending,
39569- SA5_completed,
39570+ .submit_command = SA5_submit_command,
39571+ .set_intr_mask = SA5_intr_mask,
39572+ .fifo_full = SA5_fifo_full,
39573+ .intr_pending = SA5_intr_pending,
39574+ .command_completed = SA5_completed,
39575 };
39576
39577 static struct access_method SA5B_access = {
39578- SA5_submit_command,
39579- SA5B_intr_mask,
39580- SA5_fifo_full,
39581- SA5B_intr_pending,
39582- SA5_completed,
39583+ .submit_command = SA5_submit_command,
39584+ .set_intr_mask = SA5B_intr_mask,
39585+ .fifo_full = SA5_fifo_full,
39586+ .intr_pending = SA5B_intr_pending,
39587+ .command_completed = SA5_completed,
39588 };
39589
39590 static struct access_method SA5_performant_access = {
39591- SA5_submit_command,
39592- SA5_performant_intr_mask,
39593- SA5_fifo_full,
39594- SA5_performant_intr_pending,
39595- SA5_performant_completed,
39596+ .submit_command = SA5_submit_command,
39597+ .set_intr_mask = SA5_performant_intr_mask,
39598+ .fifo_full = SA5_fifo_full,
39599+ .intr_pending = SA5_performant_intr_pending,
39600+ .command_completed = SA5_performant_completed,
39601 };
39602
39603 struct board_type {
39604diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
39605index 2b94403..fd6ad1f 100644
39606--- a/drivers/block/cpqarray.c
39607+++ b/drivers/block/cpqarray.c
39608@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
39609 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
39610 goto Enomem4;
39611 }
39612- hba[i]->access.set_intr_mask(hba[i], 0);
39613+ hba[i]->access->set_intr_mask(hba[i], 0);
39614 if (request_irq(hba[i]->intr, do_ida_intr,
39615 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
39616 {
39617@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
39618 add_timer(&hba[i]->timer);
39619
39620 /* Enable IRQ now that spinlock and rate limit timer are set up */
39621- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
39622+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
39623
39624 for(j=0; j<NWD; j++) {
39625 struct gendisk *disk = ida_gendisk[i][j];
39626@@ -694,7 +694,7 @@ DBGINFO(
39627 for(i=0; i<NR_PRODUCTS; i++) {
39628 if (board_id == products[i].board_id) {
39629 c->product_name = products[i].product_name;
39630- c->access = *(products[i].access);
39631+ c->access = products[i].access;
39632 break;
39633 }
39634 }
39635@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
39636 hba[ctlr]->intr = intr;
39637 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
39638 hba[ctlr]->product_name = products[j].product_name;
39639- hba[ctlr]->access = *(products[j].access);
39640+ hba[ctlr]->access = products[j].access;
39641 hba[ctlr]->ctlr = ctlr;
39642 hba[ctlr]->board_id = board_id;
39643 hba[ctlr]->pci_dev = NULL; /* not PCI */
39644@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
39645
39646 while((c = h->reqQ) != NULL) {
39647 /* Can't do anything if we're busy */
39648- if (h->access.fifo_full(h) == 0)
39649+ if (h->access->fifo_full(h) == 0)
39650 return;
39651
39652 /* Get the first entry from the request Q */
39653@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
39654 h->Qdepth--;
39655
39656 /* Tell the controller to do our bidding */
39657- h->access.submit_command(h, c);
39658+ h->access->submit_command(h, c);
39659
39660 /* Get onto the completion Q */
39661 addQ(&h->cmpQ, c);
39662@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
39663 unsigned long flags;
39664 __u32 a,a1;
39665
39666- istat = h->access.intr_pending(h);
39667+ istat = h->access->intr_pending(h);
39668 /* Is this interrupt for us? */
39669 if (istat == 0)
39670 return IRQ_NONE;
39671@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
39672 */
39673 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
39674 if (istat & FIFO_NOT_EMPTY) {
39675- while((a = h->access.command_completed(h))) {
39676+ while((a = h->access->command_completed(h))) {
39677 a1 = a; a &= ~3;
39678 if ((c = h->cmpQ) == NULL)
39679 {
39680@@ -1448,11 +1448,11 @@ static int sendcmd(
39681 /*
39682 * Disable interrupt
39683 */
39684- info_p->access.set_intr_mask(info_p, 0);
39685+ info_p->access->set_intr_mask(info_p, 0);
39686 /* Make sure there is room in the command FIFO */
39687 /* Actually it should be completely empty at this time. */
39688 for (i = 200000; i > 0; i--) {
39689- temp = info_p->access.fifo_full(info_p);
39690+ temp = info_p->access->fifo_full(info_p);
39691 if (temp != 0) {
39692 break;
39693 }
39694@@ -1465,7 +1465,7 @@ DBG(
39695 /*
39696 * Send the cmd
39697 */
39698- info_p->access.submit_command(info_p, c);
39699+ info_p->access->submit_command(info_p, c);
39700 complete = pollcomplete(ctlr);
39701
39702 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
39703@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
39704 * we check the new geometry. Then turn interrupts back on when
39705 * we're done.
39706 */
39707- host->access.set_intr_mask(host, 0);
39708+ host->access->set_intr_mask(host, 0);
39709 getgeometry(ctlr);
39710- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
39711+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
39712
39713 for(i=0; i<NWD; i++) {
39714 struct gendisk *disk = ida_gendisk[ctlr][i];
39715@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
39716 /* Wait (up to 2 seconds) for a command to complete */
39717
39718 for (i = 200000; i > 0; i--) {
39719- done = hba[ctlr]->access.command_completed(hba[ctlr]);
39720+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
39721 if (done == 0) {
39722 udelay(10); /* a short fixed delay */
39723 } else
39724diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
39725index be73e9d..7fbf140 100644
39726--- a/drivers/block/cpqarray.h
39727+++ b/drivers/block/cpqarray.h
39728@@ -99,7 +99,7 @@ struct ctlr_info {
39729 drv_info_t drv[NWD];
39730 struct proc_dir_entry *proc;
39731
39732- struct access_method access;
39733+ struct access_method *access;
39734
39735 cmdlist_t *reqQ;
39736 cmdlist_t *cmpQ;
39737diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
39738index a76ceb3..3c1a9fd 100644
39739--- a/drivers/block/drbd/drbd_int.h
39740+++ b/drivers/block/drbd/drbd_int.h
39741@@ -331,7 +331,7 @@ struct drbd_epoch {
39742 struct drbd_connection *connection;
39743 struct list_head list;
39744 unsigned int barrier_nr;
39745- atomic_t epoch_size; /* increased on every request added. */
39746+ atomic_unchecked_t epoch_size; /* increased on every request added. */
39747 atomic_t active; /* increased on every req. added, and dec on every finished. */
39748 unsigned long flags;
39749 };
39750@@ -797,7 +797,7 @@ struct drbd_device {
39751 unsigned int al_tr_number;
39752 int al_tr_cycle;
39753 wait_queue_head_t seq_wait;
39754- atomic_t packet_seq;
39755+ atomic_unchecked_t packet_seq;
39756 unsigned int peer_seq;
39757 spinlock_t peer_seq_lock;
39758 unsigned int minor;
39759@@ -1407,7 +1407,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
39760 char __user *uoptval;
39761 int err;
39762
39763- uoptval = (char __user __force *)optval;
39764+ uoptval = (char __force_user *)optval;
39765
39766 set_fs(KERNEL_DS);
39767 if (level == SOL_SOCKET)
39768diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
39769index 89c497c..9c736ae 100644
39770--- a/drivers/block/drbd/drbd_interval.c
39771+++ b/drivers/block/drbd/drbd_interval.c
39772@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
39773 }
39774
39775 static const struct rb_augment_callbacks augment_callbacks = {
39776- augment_propagate,
39777- augment_copy,
39778- augment_rotate,
39779+ .propagate = augment_propagate,
39780+ .copy = augment_copy,
39781+ .rotate = augment_rotate,
39782 };
39783
39784 /**
39785diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
39786index 960645c..6c2724a 100644
39787--- a/drivers/block/drbd/drbd_main.c
39788+++ b/drivers/block/drbd/drbd_main.c
39789@@ -1322,7 +1322,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
39790 p->sector = sector;
39791 p->block_id = block_id;
39792 p->blksize = blksize;
39793- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
39794+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
39795 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
39796 }
39797
39798@@ -1628,7 +1628,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
39799 return -EIO;
39800 p->sector = cpu_to_be64(req->i.sector);
39801 p->block_id = (unsigned long)req;
39802- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
39803+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
39804 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
39805 if (device->state.conn >= C_SYNC_SOURCE &&
39806 device->state.conn <= C_PAUSED_SYNC_T)
39807@@ -2670,8 +2670,8 @@ void drbd_destroy_connection(struct kref *kref)
39808 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
39809 struct drbd_resource *resource = connection->resource;
39810
39811- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
39812- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
39813+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
39814+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
39815 kfree(connection->current_epoch);
39816
39817 idr_destroy(&connection->peer_devices);
39818diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
39819index 3f2e167..d3170e4 100644
39820--- a/drivers/block/drbd/drbd_nl.c
39821+++ b/drivers/block/drbd/drbd_nl.c
39822@@ -3616,7 +3616,7 @@ finish:
39823
39824 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
39825 {
39826- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
39827+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
39828 struct sk_buff *msg;
39829 struct drbd_genlmsghdr *d_out;
39830 unsigned seq;
39831@@ -3629,7 +3629,7 @@ void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
39832 return;
39833 }
39834
39835- seq = atomic_inc_return(&drbd_genl_seq);
39836+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
39837 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
39838 if (!msg)
39839 goto failed;
39840diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
39841index 5b17ec8..6c21e6b 100644
39842--- a/drivers/block/drbd/drbd_receiver.c
39843+++ b/drivers/block/drbd/drbd_receiver.c
39844@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
39845 struct drbd_device *device = peer_device->device;
39846 int err;
39847
39848- atomic_set(&device->packet_seq, 0);
39849+ atomic_set_unchecked(&device->packet_seq, 0);
39850 device->peer_seq = 0;
39851
39852 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
39853@@ -1199,7 +1199,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39854 do {
39855 next_epoch = NULL;
39856
39857- epoch_size = atomic_read(&epoch->epoch_size);
39858+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
39859
39860 switch (ev & ~EV_CLEANUP) {
39861 case EV_PUT:
39862@@ -1239,7 +1239,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39863 rv = FE_DESTROYED;
39864 } else {
39865 epoch->flags = 0;
39866- atomic_set(&epoch->epoch_size, 0);
39867+ atomic_set_unchecked(&epoch->epoch_size, 0);
39868 /* atomic_set(&epoch->active, 0); is already zero */
39869 if (rv == FE_STILL_LIVE)
39870 rv = FE_RECYCLED;
39871@@ -1490,7 +1490,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
39872 conn_wait_active_ee_empty(connection);
39873 drbd_flush(connection);
39874
39875- if (atomic_read(&connection->current_epoch->epoch_size)) {
39876+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
39877 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
39878 if (epoch)
39879 break;
39880@@ -1503,11 +1503,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
39881 }
39882
39883 epoch->flags = 0;
39884- atomic_set(&epoch->epoch_size, 0);
39885+ atomic_set_unchecked(&epoch->epoch_size, 0);
39886 atomic_set(&epoch->active, 0);
39887
39888 spin_lock(&connection->epoch_lock);
39889- if (atomic_read(&connection->current_epoch->epoch_size)) {
39890+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
39891 list_add(&epoch->list, &connection->current_epoch->list);
39892 connection->current_epoch = epoch;
39893 connection->epochs++;
39894@@ -2224,7 +2224,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
39895
39896 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
39897 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
39898- atomic_inc(&connection->current_epoch->epoch_size);
39899+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
39900 err2 = drbd_drain_block(peer_device, pi->size);
39901 if (!err)
39902 err = err2;
39903@@ -2266,7 +2266,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
39904
39905 spin_lock(&connection->epoch_lock);
39906 peer_req->epoch = connection->current_epoch;
39907- atomic_inc(&peer_req->epoch->epoch_size);
39908+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
39909 atomic_inc(&peer_req->epoch->active);
39910 spin_unlock(&connection->epoch_lock);
39911
39912@@ -4461,7 +4461,7 @@ struct data_cmd {
39913 int expect_payload;
39914 size_t pkt_size;
39915 int (*fn)(struct drbd_connection *, struct packet_info *);
39916-};
39917+} __do_const;
39918
39919 static struct data_cmd drbd_cmd_handler[] = {
39920 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
39921@@ -4572,7 +4572,7 @@ static void conn_disconnect(struct drbd_connection *connection)
39922 if (!list_empty(&connection->current_epoch->list))
39923 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
39924 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
39925- atomic_set(&connection->current_epoch->epoch_size, 0);
39926+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
39927 connection->send.seen_any_write_yet = false;
39928
39929 drbd_info(connection, "Connection closed\n");
39930@@ -5364,7 +5364,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
39931 struct asender_cmd {
39932 size_t pkt_size;
39933 int (*fn)(struct drbd_connection *connection, struct packet_info *);
39934-};
39935+} __do_const;
39936
39937 static struct asender_cmd asender_tbl[] = {
39938 [P_PING] = { 0, got_Ping },
39939diff --git a/drivers/block/loop.c b/drivers/block/loop.c
39940index 6cb1beb..bf490f7 100644
39941--- a/drivers/block/loop.c
39942+++ b/drivers/block/loop.c
39943@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
39944
39945 file_start_write(file);
39946 set_fs(get_ds());
39947- bw = file->f_op->write(file, buf, len, &pos);
39948+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
39949 set_fs(old_fs);
39950 file_end_write(file);
39951 if (likely(bw == len))
39952diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
39953index 02351e2..a9ea617 100644
39954--- a/drivers/block/nvme-core.c
39955+++ b/drivers/block/nvme-core.c
39956@@ -73,7 +73,6 @@ static LIST_HEAD(dev_list);
39957 static struct task_struct *nvme_thread;
39958 static struct workqueue_struct *nvme_workq;
39959 static wait_queue_head_t nvme_kthread_wait;
39960-static struct notifier_block nvme_nb;
39961
39962 static void nvme_reset_failed_dev(struct work_struct *ws);
39963
39964@@ -2925,6 +2924,10 @@ static struct pci_driver nvme_driver = {
39965 .err_handler = &nvme_err_handler,
39966 };
39967
39968+static struct notifier_block nvme_nb = {
39969+ .notifier_call = &nvme_cpu_notify,
39970+};
39971+
39972 static int __init nvme_init(void)
39973 {
39974 int result;
39975@@ -2941,7 +2944,6 @@ static int __init nvme_init(void)
39976 else if (result > 0)
39977 nvme_major = result;
39978
39979- nvme_nb.notifier_call = &nvme_cpu_notify;
39980 result = register_hotcpu_notifier(&nvme_nb);
39981 if (result)
39982 goto unregister_blkdev;
39983diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
39984index 758ac44..58087fd 100644
39985--- a/drivers/block/pktcdvd.c
39986+++ b/drivers/block/pktcdvd.c
39987@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
39988
39989 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
39990 {
39991- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
39992+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
39993 }
39994
39995 /*
39996@@ -1888,7 +1888,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
39997 return -EROFS;
39998 }
39999 pd->settings.fp = ti.fp;
40000- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
40001+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
40002
40003 if (ti.nwa_v) {
40004 pd->nwa = be32_to_cpu(ti.next_writable);
40005diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
40006index e5565fb..71be10b4 100644
40007--- a/drivers/block/smart1,2.h
40008+++ b/drivers/block/smart1,2.h
40009@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
40010 }
40011
40012 static struct access_method smart4_access = {
40013- smart4_submit_command,
40014- smart4_intr_mask,
40015- smart4_fifo_full,
40016- smart4_intr_pending,
40017- smart4_completed,
40018+ .submit_command = smart4_submit_command,
40019+ .set_intr_mask = smart4_intr_mask,
40020+ .fifo_full = smart4_fifo_full,
40021+ .intr_pending = smart4_intr_pending,
40022+ .command_completed = smart4_completed,
40023 };
40024
40025 /*
40026@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
40027 }
40028
40029 static struct access_method smart2_access = {
40030- smart2_submit_command,
40031- smart2_intr_mask,
40032- smart2_fifo_full,
40033- smart2_intr_pending,
40034- smart2_completed,
40035+ .submit_command = smart2_submit_command,
40036+ .set_intr_mask = smart2_intr_mask,
40037+ .fifo_full = smart2_fifo_full,
40038+ .intr_pending = smart2_intr_pending,
40039+ .command_completed = smart2_completed,
40040 };
40041
40042 /*
40043@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
40044 }
40045
40046 static struct access_method smart2e_access = {
40047- smart2e_submit_command,
40048- smart2e_intr_mask,
40049- smart2e_fifo_full,
40050- smart2e_intr_pending,
40051- smart2e_completed,
40052+ .submit_command = smart2e_submit_command,
40053+ .set_intr_mask = smart2e_intr_mask,
40054+ .fifo_full = smart2e_fifo_full,
40055+ .intr_pending = smart2e_intr_pending,
40056+ .command_completed = smart2e_completed,
40057 };
40058
40059 /*
40060@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
40061 }
40062
40063 static struct access_method smart1_access = {
40064- smart1_submit_command,
40065- smart1_intr_mask,
40066- smart1_fifo_full,
40067- smart1_intr_pending,
40068- smart1_completed,
40069+ .submit_command = smart1_submit_command,
40070+ .set_intr_mask = smart1_intr_mask,
40071+ .fifo_full = smart1_fifo_full,
40072+ .intr_pending = smart1_intr_pending,
40073+ .command_completed = smart1_completed,
40074 };
40075diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
40076index f038dba..bb74c08 100644
40077--- a/drivers/bluetooth/btwilink.c
40078+++ b/drivers/bluetooth/btwilink.c
40079@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
40080
40081 static int bt_ti_probe(struct platform_device *pdev)
40082 {
40083- static struct ti_st *hst;
40084+ struct ti_st *hst;
40085 struct hci_dev *hdev;
40086 int err;
40087
40088diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
40089index 898b84b..86f74b9 100644
40090--- a/drivers/cdrom/cdrom.c
40091+++ b/drivers/cdrom/cdrom.c
40092@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
40093 ENSURE(reset, CDC_RESET);
40094 ENSURE(generic_packet, CDC_GENERIC_PACKET);
40095 cdi->mc_flags = 0;
40096- cdo->n_minors = 0;
40097 cdi->options = CDO_USE_FFLAGS;
40098
40099 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
40100@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
40101 else
40102 cdi->cdda_method = CDDA_OLD;
40103
40104- if (!cdo->generic_packet)
40105- cdo->generic_packet = cdrom_dummy_generic_packet;
40106+ if (!cdo->generic_packet) {
40107+ pax_open_kernel();
40108+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
40109+ pax_close_kernel();
40110+ }
40111
40112 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
40113 mutex_lock(&cdrom_mutex);
40114@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
40115 if (cdi->exit)
40116 cdi->exit(cdi);
40117
40118- cdi->ops->n_minors--;
40119 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
40120 }
40121
40122@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
40123 */
40124 nr = nframes;
40125 do {
40126- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
40127+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
40128 if (cgc.buffer)
40129 break;
40130
40131@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
40132 struct cdrom_device_info *cdi;
40133 int ret;
40134
40135- ret = scnprintf(info + *pos, max_size - *pos, header);
40136+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
40137 if (!ret)
40138 return 1;
40139
40140diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
40141index 584bc31..e64a12c 100644
40142--- a/drivers/cdrom/gdrom.c
40143+++ b/drivers/cdrom/gdrom.c
40144@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
40145 .audio_ioctl = gdrom_audio_ioctl,
40146 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
40147 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
40148- .n_minors = 1,
40149 };
40150
40151 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
40152diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
40153index 6e9f74a..50c7cea 100644
40154--- a/drivers/char/Kconfig
40155+++ b/drivers/char/Kconfig
40156@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
40157
40158 config DEVKMEM
40159 bool "/dev/kmem virtual device support"
40160- default y
40161+ default n
40162+ depends on !GRKERNSEC_KMEM
40163 help
40164 Say Y here if you want to support the /dev/kmem device. The
40165 /dev/kmem device is rarely used, but can be used for certain
40166@@ -577,6 +578,7 @@ config DEVPORT
40167 bool
40168 depends on !M68K
40169 depends on ISA || PCI
40170+ depends on !GRKERNSEC_KMEM
40171 default y
40172
40173 source "drivers/s390/char/Kconfig"
40174diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
40175index a48e05b..6bac831 100644
40176--- a/drivers/char/agp/compat_ioctl.c
40177+++ b/drivers/char/agp/compat_ioctl.c
40178@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
40179 return -ENOMEM;
40180 }
40181
40182- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
40183+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
40184 sizeof(*usegment) * ureserve.seg_count)) {
40185 kfree(usegment);
40186 kfree(ksegment);
40187diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
40188index b297033..fa217ca 100644
40189--- a/drivers/char/agp/frontend.c
40190+++ b/drivers/char/agp/frontend.c
40191@@ -819,7 +819,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
40192 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
40193 return -EFAULT;
40194
40195- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
40196+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
40197 return -EFAULT;
40198
40199 client = agp_find_client_by_pid(reserve.pid);
40200@@ -849,7 +849,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
40201 if (segment == NULL)
40202 return -ENOMEM;
40203
40204- if (copy_from_user(segment, (void __user *) reserve.seg_list,
40205+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
40206 sizeof(struct agp_segment) * reserve.seg_count)) {
40207 kfree(segment);
40208 return -EFAULT;
40209diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
40210index 4f94375..413694e 100644
40211--- a/drivers/char/genrtc.c
40212+++ b/drivers/char/genrtc.c
40213@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
40214 switch (cmd) {
40215
40216 case RTC_PLL_GET:
40217+ memset(&pll, 0, sizeof(pll));
40218 if (get_rtc_pll(&pll))
40219 return -EINVAL;
40220 else
40221diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
40222index d5d4cd8..22d561d 100644
40223--- a/drivers/char/hpet.c
40224+++ b/drivers/char/hpet.c
40225@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
40226 }
40227
40228 static int
40229-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
40230+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
40231 struct hpet_info *info)
40232 {
40233 struct hpet_timer __iomem *timer;
40234diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
40235index 86fe45c..c0ea948 100644
40236--- a/drivers/char/hw_random/intel-rng.c
40237+++ b/drivers/char/hw_random/intel-rng.c
40238@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
40239
40240 if (no_fwh_detect)
40241 return -ENODEV;
40242- printk(warning);
40243+ printk("%s", warning);
40244 return -EBUSY;
40245 }
40246
40247diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
40248index e6db938..835e3a2 100644
40249--- a/drivers/char/ipmi/ipmi_msghandler.c
40250+++ b/drivers/char/ipmi/ipmi_msghandler.c
40251@@ -438,7 +438,7 @@ struct ipmi_smi {
40252 struct proc_dir_entry *proc_dir;
40253 char proc_dir_name[10];
40254
40255- atomic_t stats[IPMI_NUM_STATS];
40256+ atomic_unchecked_t stats[IPMI_NUM_STATS];
40257
40258 /*
40259 * run_to_completion duplicate of smb_info, smi_info
40260@@ -470,9 +470,9 @@ static LIST_HEAD(smi_watchers);
40261 static DEFINE_MUTEX(smi_watchers_mutex);
40262
40263 #define ipmi_inc_stat(intf, stat) \
40264- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
40265+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
40266 #define ipmi_get_stat(intf, stat) \
40267- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
40268+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
40269
40270 static int is_lan_addr(struct ipmi_addr *addr)
40271 {
40272@@ -2926,7 +2926,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
40273 INIT_LIST_HEAD(&intf->cmd_rcvrs);
40274 init_waitqueue_head(&intf->waitq);
40275 for (i = 0; i < IPMI_NUM_STATS; i++)
40276- atomic_set(&intf->stats[i], 0);
40277+ atomic_set_unchecked(&intf->stats[i], 0);
40278
40279 intf->proc_dir = NULL;
40280
40281diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
40282index 5d66568..c9d93c3 100644
40283--- a/drivers/char/ipmi/ipmi_si_intf.c
40284+++ b/drivers/char/ipmi/ipmi_si_intf.c
40285@@ -285,7 +285,7 @@ struct smi_info {
40286 unsigned char slave_addr;
40287
40288 /* Counters and things for the proc filesystem. */
40289- atomic_t stats[SI_NUM_STATS];
40290+ atomic_unchecked_t stats[SI_NUM_STATS];
40291
40292 struct task_struct *thread;
40293
40294@@ -294,9 +294,9 @@ struct smi_info {
40295 };
40296
40297 #define smi_inc_stat(smi, stat) \
40298- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
40299+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
40300 #define smi_get_stat(smi, stat) \
40301- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
40302+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
40303
40304 #define SI_MAX_PARMS 4
40305
40306@@ -3374,7 +3374,7 @@ static int try_smi_init(struct smi_info *new_smi)
40307 atomic_set(&new_smi->req_events, 0);
40308 new_smi->run_to_completion = false;
40309 for (i = 0; i < SI_NUM_STATS; i++)
40310- atomic_set(&new_smi->stats[i], 0);
40311+ atomic_set_unchecked(&new_smi->stats[i], 0);
40312
40313 new_smi->interrupt_disabled = true;
40314 atomic_set(&new_smi->stop_operation, 0);
40315diff --git a/drivers/char/mem.c b/drivers/char/mem.c
40316index 917403f..dddd899 100644
40317--- a/drivers/char/mem.c
40318+++ b/drivers/char/mem.c
40319@@ -18,6 +18,7 @@
40320 #include <linux/raw.h>
40321 #include <linux/tty.h>
40322 #include <linux/capability.h>
40323+#include <linux/security.h>
40324 #include <linux/ptrace.h>
40325 #include <linux/device.h>
40326 #include <linux/highmem.h>
40327@@ -36,6 +37,10 @@
40328
40329 #define DEVPORT_MINOR 4
40330
40331+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
40332+extern const struct file_operations grsec_fops;
40333+#endif
40334+
40335 static inline unsigned long size_inside_page(unsigned long start,
40336 unsigned long size)
40337 {
40338@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40339
40340 while (cursor < to) {
40341 if (!devmem_is_allowed(pfn)) {
40342+#ifdef CONFIG_GRKERNSEC_KMEM
40343+ gr_handle_mem_readwrite(from, to);
40344+#else
40345 printk(KERN_INFO
40346 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
40347 current->comm, from, to);
40348+#endif
40349 return 0;
40350 }
40351 cursor += PAGE_SIZE;
40352@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40353 }
40354 return 1;
40355 }
40356+#elif defined(CONFIG_GRKERNSEC_KMEM)
40357+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40358+{
40359+ return 0;
40360+}
40361 #else
40362 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40363 {
40364@@ -122,6 +136,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
40365
40366 while (count > 0) {
40367 unsigned long remaining;
40368+ char *temp;
40369
40370 sz = size_inside_page(p, count);
40371
40372@@ -137,7 +152,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
40373 if (!ptr)
40374 return -EFAULT;
40375
40376- remaining = copy_to_user(buf, ptr, sz);
40377+#ifdef CONFIG_PAX_USERCOPY
40378+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
40379+ if (!temp) {
40380+ unxlate_dev_mem_ptr(p, ptr);
40381+ return -ENOMEM;
40382+ }
40383+ memcpy(temp, ptr, sz);
40384+#else
40385+ temp = ptr;
40386+#endif
40387+
40388+ remaining = copy_to_user(buf, temp, sz);
40389+
40390+#ifdef CONFIG_PAX_USERCOPY
40391+ kfree(temp);
40392+#endif
40393+
40394 unxlate_dev_mem_ptr(p, ptr);
40395 if (remaining)
40396 return -EFAULT;
40397@@ -369,9 +400,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40398 size_t count, loff_t *ppos)
40399 {
40400 unsigned long p = *ppos;
40401- ssize_t low_count, read, sz;
40402+ ssize_t low_count, read, sz, err = 0;
40403 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
40404- int err = 0;
40405
40406 read = 0;
40407 if (p < (unsigned long) high_memory) {
40408@@ -393,6 +423,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40409 }
40410 #endif
40411 while (low_count > 0) {
40412+ char *temp;
40413+
40414 sz = size_inside_page(p, low_count);
40415
40416 /*
40417@@ -402,7 +434,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40418 */
40419 kbuf = xlate_dev_kmem_ptr((char *)p);
40420
40421- if (copy_to_user(buf, kbuf, sz))
40422+#ifdef CONFIG_PAX_USERCOPY
40423+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
40424+ if (!temp)
40425+ return -ENOMEM;
40426+ memcpy(temp, kbuf, sz);
40427+#else
40428+ temp = kbuf;
40429+#endif
40430+
40431+ err = copy_to_user(buf, temp, sz);
40432+
40433+#ifdef CONFIG_PAX_USERCOPY
40434+ kfree(temp);
40435+#endif
40436+
40437+ if (err)
40438 return -EFAULT;
40439 buf += sz;
40440 p += sz;
40441@@ -827,6 +874,9 @@ static const struct memdev {
40442 #ifdef CONFIG_PRINTK
40443 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
40444 #endif
40445+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
40446+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
40447+#endif
40448 };
40449
40450 static int memory_open(struct inode *inode, struct file *filp)
40451@@ -898,7 +948,7 @@ static int __init chr_dev_init(void)
40452 continue;
40453
40454 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
40455- NULL, devlist[minor].name);
40456+ NULL, "%s", devlist[minor].name);
40457 }
40458
40459 return tty_init();
40460diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
40461index 9df78e2..01ba9ae 100644
40462--- a/drivers/char/nvram.c
40463+++ b/drivers/char/nvram.c
40464@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
40465
40466 spin_unlock_irq(&rtc_lock);
40467
40468- if (copy_to_user(buf, contents, tmp - contents))
40469+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
40470 return -EFAULT;
40471
40472 *ppos = i;
40473diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
40474index 8320abd..ec48108 100644
40475--- a/drivers/char/pcmcia/synclink_cs.c
40476+++ b/drivers/char/pcmcia/synclink_cs.c
40477@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
40478
40479 if (debug_level >= DEBUG_LEVEL_INFO)
40480 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
40481- __FILE__, __LINE__, info->device_name, port->count);
40482+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
40483
40484- WARN_ON(!port->count);
40485+ WARN_ON(!atomic_read(&port->count));
40486
40487 if (tty_port_close_start(port, tty, filp) == 0)
40488 goto cleanup;
40489@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
40490 cleanup:
40491 if (debug_level >= DEBUG_LEVEL_INFO)
40492 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
40493- tty->driver->name, port->count);
40494+ tty->driver->name, atomic_read(&port->count));
40495 }
40496
40497 /* Wait until the transmitter is empty.
40498@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
40499
40500 if (debug_level >= DEBUG_LEVEL_INFO)
40501 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
40502- __FILE__, __LINE__, tty->driver->name, port->count);
40503+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
40504
40505 /* If port is closing, signal caller to try again */
40506 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
40507@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
40508 goto cleanup;
40509 }
40510 spin_lock(&port->lock);
40511- port->count++;
40512+ atomic_inc(&port->count);
40513 spin_unlock(&port->lock);
40514 spin_unlock_irqrestore(&info->netlock, flags);
40515
40516- if (port->count == 1) {
40517+ if (atomic_read(&port->count) == 1) {
40518 /* 1st open on this device, init hardware */
40519 retval = startup(info, tty);
40520 if (retval < 0)
40521@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
40522 unsigned short new_crctype;
40523
40524 /* return error if TTY interface open */
40525- if (info->port.count)
40526+ if (atomic_read(&info->port.count))
40527 return -EBUSY;
40528
40529 switch (encoding)
40530@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
40531
40532 /* arbitrate between network and tty opens */
40533 spin_lock_irqsave(&info->netlock, flags);
40534- if (info->port.count != 0 || info->netcount != 0) {
40535+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
40536 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
40537 spin_unlock_irqrestore(&info->netlock, flags);
40538 return -EBUSY;
40539@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40540 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
40541
40542 /* return error if TTY interface open */
40543- if (info->port.count)
40544+ if (atomic_read(&info->port.count))
40545 return -EBUSY;
40546
40547 if (cmd != SIOCWANDEV)
40548diff --git a/drivers/char/random.c b/drivers/char/random.c
40549index 71529e1..822b036 100644
40550--- a/drivers/char/random.c
40551+++ b/drivers/char/random.c
40552@@ -284,9 +284,6 @@
40553 /*
40554 * To allow fractional bits to be tracked, the entropy_count field is
40555 * denominated in units of 1/8th bits.
40556- *
40557- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
40558- * credit_entropy_bits() needs to be 64 bits wide.
40559 */
40560 #define ENTROPY_SHIFT 3
40561 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
40562@@ -433,9 +430,9 @@ struct entropy_store {
40563 };
40564
40565 static void push_to_pool(struct work_struct *work);
40566-static __u32 input_pool_data[INPUT_POOL_WORDS];
40567-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
40568-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
40569+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
40570+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
40571+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
40572
40573 static struct entropy_store input_pool = {
40574 .poolinfo = &poolinfo_table[0],
40575@@ -524,8 +521,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
40576 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
40577 }
40578
40579- ACCESS_ONCE(r->input_rotate) = input_rotate;
40580- ACCESS_ONCE(r->add_ptr) = i;
40581+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
40582+ ACCESS_ONCE_RW(r->add_ptr) = i;
40583 smp_wmb();
40584
40585 if (out)
40586@@ -632,7 +629,7 @@ retry:
40587 /* The +2 corresponds to the /4 in the denominator */
40588
40589 do {
40590- unsigned int anfrac = min(pnfrac, pool_size/2);
40591+ u64 anfrac = min(pnfrac, pool_size/2);
40592 unsigned int add =
40593 ((pool_size - entropy_count)*anfrac*3) >> s;
40594
40595@@ -1177,7 +1174,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
40596
40597 extract_buf(r, tmp);
40598 i = min_t(int, nbytes, EXTRACT_SIZE);
40599- if (copy_to_user(buf, tmp, i)) {
40600+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
40601 ret = -EFAULT;
40602 break;
40603 }
40604@@ -1567,7 +1564,7 @@ static char sysctl_bootid[16];
40605 static int proc_do_uuid(struct ctl_table *table, int write,
40606 void __user *buffer, size_t *lenp, loff_t *ppos)
40607 {
40608- struct ctl_table fake_table;
40609+ ctl_table_no_const fake_table;
40610 unsigned char buf[64], tmp_uuid[16], *uuid;
40611
40612 uuid = table->data;
40613@@ -1597,7 +1594,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
40614 static int proc_do_entropy(struct ctl_table *table, int write,
40615 void __user *buffer, size_t *lenp, loff_t *ppos)
40616 {
40617- struct ctl_table fake_table;
40618+ ctl_table_no_const fake_table;
40619 int entropy_count;
40620
40621 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
40622diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
40623index 7cc1fe22..b602d6b 100644
40624--- a/drivers/char/sonypi.c
40625+++ b/drivers/char/sonypi.c
40626@@ -54,6 +54,7 @@
40627
40628 #include <asm/uaccess.h>
40629 #include <asm/io.h>
40630+#include <asm/local.h>
40631
40632 #include <linux/sonypi.h>
40633
40634@@ -490,7 +491,7 @@ static struct sonypi_device {
40635 spinlock_t fifo_lock;
40636 wait_queue_head_t fifo_proc_list;
40637 struct fasync_struct *fifo_async;
40638- int open_count;
40639+ local_t open_count;
40640 int model;
40641 struct input_dev *input_jog_dev;
40642 struct input_dev *input_key_dev;
40643@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
40644 static int sonypi_misc_release(struct inode *inode, struct file *file)
40645 {
40646 mutex_lock(&sonypi_device.lock);
40647- sonypi_device.open_count--;
40648+ local_dec(&sonypi_device.open_count);
40649 mutex_unlock(&sonypi_device.lock);
40650 return 0;
40651 }
40652@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
40653 {
40654 mutex_lock(&sonypi_device.lock);
40655 /* Flush input queue on first open */
40656- if (!sonypi_device.open_count)
40657+ if (!local_read(&sonypi_device.open_count))
40658 kfifo_reset(&sonypi_device.fifo);
40659- sonypi_device.open_count++;
40660+ local_inc(&sonypi_device.open_count);
40661 mutex_unlock(&sonypi_device.lock);
40662
40663 return 0;
40664diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
40665index 565a947..dcdc06e 100644
40666--- a/drivers/char/tpm/tpm_acpi.c
40667+++ b/drivers/char/tpm/tpm_acpi.c
40668@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
40669 virt = acpi_os_map_iomem(start, len);
40670 if (!virt) {
40671 kfree(log->bios_event_log);
40672+ log->bios_event_log = NULL;
40673 printk("%s: ERROR - Unable to map memory\n", __func__);
40674 return -EIO;
40675 }
40676
40677- memcpy_fromio(log->bios_event_log, virt, len);
40678+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
40679
40680 acpi_os_unmap_iomem(virt, len);
40681 return 0;
40682diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
40683index 59f7cb2..bac8b6d 100644
40684--- a/drivers/char/tpm/tpm_eventlog.c
40685+++ b/drivers/char/tpm/tpm_eventlog.c
40686@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
40687 event = addr;
40688
40689 if ((event->event_type == 0 && event->event_size == 0) ||
40690- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
40691+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
40692 return NULL;
40693
40694 return addr;
40695@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
40696 return NULL;
40697
40698 if ((event->event_type == 0 && event->event_size == 0) ||
40699- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
40700+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
40701 return NULL;
40702
40703 (*pos)++;
40704@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
40705 int i;
40706
40707 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
40708- seq_putc(m, data[i]);
40709+ if (!seq_putc(m, data[i]))
40710+ return -EFAULT;
40711
40712 return 0;
40713 }
40714diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
40715index 60aafb8..10c08e0 100644
40716--- a/drivers/char/virtio_console.c
40717+++ b/drivers/char/virtio_console.c
40718@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
40719 if (to_user) {
40720 ssize_t ret;
40721
40722- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
40723+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
40724 if (ret)
40725 return -EFAULT;
40726 } else {
40727@@ -787,7 +787,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
40728 if (!port_has_data(port) && !port->host_connected)
40729 return 0;
40730
40731- return fill_readbuf(port, ubuf, count, true);
40732+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
40733 }
40734
40735 static int wait_port_writable(struct port *port, bool nonblock)
40736diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
40737index 57a078e..c17cde8 100644
40738--- a/drivers/clk/clk-composite.c
40739+++ b/drivers/clk/clk-composite.c
40740@@ -146,7 +146,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
40741 struct clk *clk;
40742 struct clk_init_data init;
40743 struct clk_composite *composite;
40744- struct clk_ops *clk_composite_ops;
40745+ clk_ops_no_const *clk_composite_ops;
40746
40747 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
40748 if (!composite) {
40749diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
40750index dd3a78c..386d49c 100644
40751--- a/drivers/clk/socfpga/clk-gate.c
40752+++ b/drivers/clk/socfpga/clk-gate.c
40753@@ -22,6 +22,7 @@
40754 #include <linux/mfd/syscon.h>
40755 #include <linux/of.h>
40756 #include <linux/regmap.h>
40757+#include <asm/pgtable.h>
40758
40759 #include "clk.h"
40760
40761@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
40762 return 0;
40763 }
40764
40765-static struct clk_ops gateclk_ops = {
40766+static clk_ops_no_const gateclk_ops __read_only = {
40767 .prepare = socfpga_clk_prepare,
40768 .recalc_rate = socfpga_clk_recalc_rate,
40769 .get_parent = socfpga_clk_get_parent,
40770@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
40771 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
40772 socfpga_clk->hw.bit_idx = clk_gate[1];
40773
40774- gateclk_ops.enable = clk_gate_ops.enable;
40775- gateclk_ops.disable = clk_gate_ops.disable;
40776+ pax_open_kernel();
40777+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
40778+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
40779+ pax_close_kernel();
40780 }
40781
40782 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
40783diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
40784index de6da95..a2e72c0 100644
40785--- a/drivers/clk/socfpga/clk-pll.c
40786+++ b/drivers/clk/socfpga/clk-pll.c
40787@@ -21,6 +21,7 @@
40788 #include <linux/io.h>
40789 #include <linux/of.h>
40790 #include <linux/of_address.h>
40791+#include <asm/pgtable.h>
40792
40793 #include "clk.h"
40794
40795@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
40796 CLK_MGR_PLL_CLK_SRC_MASK;
40797 }
40798
40799-static struct clk_ops clk_pll_ops = {
40800+static struct clk_ops_no_const clk_pll_ops __read_only = {
40801 .recalc_rate = clk_pll_recalc_rate,
40802 .get_parent = clk_pll_get_parent,
40803 };
40804@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
40805 pll_clk->hw.hw.init = &init;
40806
40807 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
40808- clk_pll_ops.enable = clk_gate_ops.enable;
40809- clk_pll_ops.disable = clk_gate_ops.disable;
40810+ pax_open_kernel();
40811+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
40812+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
40813+ pax_close_kernel();
40814
40815 clk = clk_register(NULL, &pll_clk->hw.hw);
40816 if (WARN_ON(IS_ERR(clk))) {
40817diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
40818index b0c18ed..1713a80 100644
40819--- a/drivers/cpufreq/acpi-cpufreq.c
40820+++ b/drivers/cpufreq/acpi-cpufreq.c
40821@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40822 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
40823 per_cpu(acfreq_data, cpu) = data;
40824
40825- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
40826- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40827+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
40828+ pax_open_kernel();
40829+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40830+ pax_close_kernel();
40831+ }
40832
40833 result = acpi_processor_register_performance(data->acpi_data, cpu);
40834 if (result)
40835@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40836 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
40837 break;
40838 case ACPI_ADR_SPACE_FIXED_HARDWARE:
40839- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40840+ pax_open_kernel();
40841+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40842+ pax_close_kernel();
40843 break;
40844 default:
40845 break;
40846@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
40847 if (!msrs)
40848 return;
40849
40850- acpi_cpufreq_driver.boost_supported = true;
40851- acpi_cpufreq_driver.boost_enabled = boost_state(0);
40852+ pax_open_kernel();
40853+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
40854+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
40855+ pax_close_kernel();
40856
40857 cpu_notifier_register_begin();
40858
40859diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
40860index 6f02485..13684ae 100644
40861--- a/drivers/cpufreq/cpufreq.c
40862+++ b/drivers/cpufreq/cpufreq.c
40863@@ -2100,7 +2100,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
40864 }
40865
40866 mutex_lock(&cpufreq_governor_mutex);
40867- list_del(&governor->governor_list);
40868+ pax_list_del(&governor->governor_list);
40869 mutex_unlock(&cpufreq_governor_mutex);
40870 return;
40871 }
40872@@ -2316,7 +2316,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
40873 return NOTIFY_OK;
40874 }
40875
40876-static struct notifier_block __refdata cpufreq_cpu_notifier = {
40877+static struct notifier_block cpufreq_cpu_notifier = {
40878 .notifier_call = cpufreq_cpu_callback,
40879 };
40880
40881@@ -2356,13 +2356,17 @@ int cpufreq_boost_trigger_state(int state)
40882 return 0;
40883
40884 write_lock_irqsave(&cpufreq_driver_lock, flags);
40885- cpufreq_driver->boost_enabled = state;
40886+ pax_open_kernel();
40887+ *(bool *)&cpufreq_driver->boost_enabled = state;
40888+ pax_close_kernel();
40889 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
40890
40891 ret = cpufreq_driver->set_boost(state);
40892 if (ret) {
40893 write_lock_irqsave(&cpufreq_driver_lock, flags);
40894- cpufreq_driver->boost_enabled = !state;
40895+ pax_open_kernel();
40896+ *(bool *)&cpufreq_driver->boost_enabled = !state;
40897+ pax_close_kernel();
40898 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
40899
40900 pr_err("%s: Cannot %s BOOST\n",
40901@@ -2419,8 +2423,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
40902
40903 pr_debug("trying to register driver %s\n", driver_data->name);
40904
40905- if (driver_data->setpolicy)
40906- driver_data->flags |= CPUFREQ_CONST_LOOPS;
40907+ if (driver_data->setpolicy) {
40908+ pax_open_kernel();
40909+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
40910+ pax_close_kernel();
40911+ }
40912
40913 write_lock_irqsave(&cpufreq_driver_lock, flags);
40914 if (cpufreq_driver) {
40915@@ -2435,8 +2442,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
40916 * Check if driver provides function to enable boost -
40917 * if not, use cpufreq_boost_set_sw as default
40918 */
40919- if (!cpufreq_driver->set_boost)
40920- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
40921+ if (!cpufreq_driver->set_boost) {
40922+ pax_open_kernel();
40923+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
40924+ pax_close_kernel();
40925+ }
40926
40927 ret = cpufreq_sysfs_create_file(&boost.attr);
40928 if (ret) {
40929diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
40930index 1b44496..b80ff5e 100644
40931--- a/drivers/cpufreq/cpufreq_governor.c
40932+++ b/drivers/cpufreq/cpufreq_governor.c
40933@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40934 struct dbs_data *dbs_data;
40935 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
40936 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
40937- struct od_ops *od_ops = NULL;
40938+ const struct od_ops *od_ops = NULL;
40939 struct od_dbs_tuners *od_tuners = NULL;
40940 struct cs_dbs_tuners *cs_tuners = NULL;
40941 struct cpu_dbs_common_info *cpu_cdbs;
40942@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40943
40944 if ((cdata->governor == GOV_CONSERVATIVE) &&
40945 (!policy->governor->initialized)) {
40946- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40947+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40948
40949 cpufreq_register_notifier(cs_ops->notifier_block,
40950 CPUFREQ_TRANSITION_NOTIFIER);
40951@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40952
40953 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
40954 (policy->governor->initialized == 1)) {
40955- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40956+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40957
40958 cpufreq_unregister_notifier(cs_ops->notifier_block,
40959 CPUFREQ_TRANSITION_NOTIFIER);
40960diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
40961index cc401d1..8197340 100644
40962--- a/drivers/cpufreq/cpufreq_governor.h
40963+++ b/drivers/cpufreq/cpufreq_governor.h
40964@@ -212,7 +212,7 @@ struct common_dbs_data {
40965 void (*exit)(struct dbs_data *dbs_data);
40966
40967 /* Governor specific ops, see below */
40968- void *gov_ops;
40969+ const void *gov_ops;
40970 };
40971
40972 /* Governor Per policy data */
40973@@ -232,7 +232,7 @@ struct od_ops {
40974 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
40975 unsigned int freq_next, unsigned int relation);
40976 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
40977-};
40978+} __no_const;
40979
40980 struct cs_ops {
40981 struct notifier_block *notifier_block;
40982diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
40983index 18d4091..434be15 100644
40984--- a/drivers/cpufreq/cpufreq_ondemand.c
40985+++ b/drivers/cpufreq/cpufreq_ondemand.c
40986@@ -521,7 +521,7 @@ static void od_exit(struct dbs_data *dbs_data)
40987
40988 define_get_cpu_dbs_routines(od_cpu_dbs_info);
40989
40990-static struct od_ops od_ops = {
40991+static struct od_ops od_ops __read_only = {
40992 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
40993 .powersave_bias_target = generic_powersave_bias_target,
40994 .freq_increase = dbs_freq_increase,
40995@@ -576,14 +576,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
40996 (struct cpufreq_policy *, unsigned int, unsigned int),
40997 unsigned int powersave_bias)
40998 {
40999- od_ops.powersave_bias_target = f;
41000+ pax_open_kernel();
41001+ *(void **)&od_ops.powersave_bias_target = f;
41002+ pax_close_kernel();
41003 od_set_powersave_bias(powersave_bias);
41004 }
41005 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
41006
41007 void od_unregister_powersave_bias_handler(void)
41008 {
41009- od_ops.powersave_bias_target = generic_powersave_bias_target;
41010+ pax_open_kernel();
41011+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
41012+ pax_close_kernel();
41013 od_set_powersave_bias(0);
41014 }
41015 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
41016diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
41017index 86631cb..c34ec78 100644
41018--- a/drivers/cpufreq/intel_pstate.c
41019+++ b/drivers/cpufreq/intel_pstate.c
41020@@ -121,10 +121,10 @@ struct pstate_funcs {
41021 struct cpu_defaults {
41022 struct pstate_adjust_policy pid_policy;
41023 struct pstate_funcs funcs;
41024-};
41025+} __do_const;
41026
41027 static struct pstate_adjust_policy pid_params;
41028-static struct pstate_funcs pstate_funcs;
41029+static struct pstate_funcs *pstate_funcs;
41030
41031 struct perf_limits {
41032 int no_turbo;
41033@@ -526,7 +526,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
41034
41035 cpu->pstate.current_pstate = pstate;
41036
41037- pstate_funcs.set(cpu, pstate);
41038+ pstate_funcs->set(cpu, pstate);
41039 }
41040
41041 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
41042@@ -546,12 +546,12 @@ static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
41043
41044 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
41045 {
41046- cpu->pstate.min_pstate = pstate_funcs.get_min();
41047- cpu->pstate.max_pstate = pstate_funcs.get_max();
41048- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
41049+ cpu->pstate.min_pstate = pstate_funcs->get_min();
41050+ cpu->pstate.max_pstate = pstate_funcs->get_max();
41051+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
41052
41053- if (pstate_funcs.get_vid)
41054- pstate_funcs.get_vid(cpu);
41055+ if (pstate_funcs->get_vid)
41056+ pstate_funcs->get_vid(cpu);
41057 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
41058 }
41059
41060@@ -838,9 +838,9 @@ static int intel_pstate_msrs_not_valid(void)
41061 rdmsrl(MSR_IA32_APERF, aperf);
41062 rdmsrl(MSR_IA32_MPERF, mperf);
41063
41064- if (!pstate_funcs.get_max() ||
41065- !pstate_funcs.get_min() ||
41066- !pstate_funcs.get_turbo())
41067+ if (!pstate_funcs->get_max() ||
41068+ !pstate_funcs->get_min() ||
41069+ !pstate_funcs->get_turbo())
41070 return -ENODEV;
41071
41072 rdmsrl(MSR_IA32_APERF, tmp);
41073@@ -854,7 +854,7 @@ static int intel_pstate_msrs_not_valid(void)
41074 return 0;
41075 }
41076
41077-static void copy_pid_params(struct pstate_adjust_policy *policy)
41078+static void copy_pid_params(const struct pstate_adjust_policy *policy)
41079 {
41080 pid_params.sample_rate_ms = policy->sample_rate_ms;
41081 pid_params.p_gain_pct = policy->p_gain_pct;
41082@@ -866,11 +866,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
41083
41084 static void copy_cpu_funcs(struct pstate_funcs *funcs)
41085 {
41086- pstate_funcs.get_max = funcs->get_max;
41087- pstate_funcs.get_min = funcs->get_min;
41088- pstate_funcs.get_turbo = funcs->get_turbo;
41089- pstate_funcs.set = funcs->set;
41090- pstate_funcs.get_vid = funcs->get_vid;
41091+ pstate_funcs = funcs;
41092 }
41093
41094 #if IS_ENABLED(CONFIG_ACPI)
41095diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
41096index 529cfd9..0e28fff 100644
41097--- a/drivers/cpufreq/p4-clockmod.c
41098+++ b/drivers/cpufreq/p4-clockmod.c
41099@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
41100 case 0x0F: /* Core Duo */
41101 case 0x16: /* Celeron Core */
41102 case 0x1C: /* Atom */
41103- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41104+ pax_open_kernel();
41105+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41106+ pax_close_kernel();
41107 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
41108 case 0x0D: /* Pentium M (Dothan) */
41109- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41110+ pax_open_kernel();
41111+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41112+ pax_close_kernel();
41113 /* fall through */
41114 case 0x09: /* Pentium M (Banias) */
41115 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
41116@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
41117
41118 /* on P-4s, the TSC runs with constant frequency independent whether
41119 * throttling is active or not. */
41120- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41121+ pax_open_kernel();
41122+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41123+ pax_close_kernel();
41124
41125 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
41126 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
41127diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
41128index 9bb42ba..b01b4a2 100644
41129--- a/drivers/cpufreq/sparc-us3-cpufreq.c
41130+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
41131@@ -18,14 +18,12 @@
41132 #include <asm/head.h>
41133 #include <asm/timer.h>
41134
41135-static struct cpufreq_driver *cpufreq_us3_driver;
41136-
41137 struct us3_freq_percpu_info {
41138 struct cpufreq_frequency_table table[4];
41139 };
41140
41141 /* Indexed by cpu number. */
41142-static struct us3_freq_percpu_info *us3_freq_table;
41143+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
41144
41145 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
41146 * in the Safari config register.
41147@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
41148
41149 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
41150 {
41151- if (cpufreq_us3_driver)
41152- us3_freq_target(policy, 0);
41153+ us3_freq_target(policy, 0);
41154
41155 return 0;
41156 }
41157
41158+static int __init us3_freq_init(void);
41159+static void __exit us3_freq_exit(void);
41160+
41161+static struct cpufreq_driver cpufreq_us3_driver = {
41162+ .init = us3_freq_cpu_init,
41163+ .verify = cpufreq_generic_frequency_table_verify,
41164+ .target_index = us3_freq_target,
41165+ .get = us3_freq_get,
41166+ .exit = us3_freq_cpu_exit,
41167+ .name = "UltraSPARC-III",
41168+
41169+};
41170+
41171 static int __init us3_freq_init(void)
41172 {
41173 unsigned long manuf, impl, ver;
41174- int ret;
41175
41176 if (tlb_type != cheetah && tlb_type != cheetah_plus)
41177 return -ENODEV;
41178@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
41179 (impl == CHEETAH_IMPL ||
41180 impl == CHEETAH_PLUS_IMPL ||
41181 impl == JAGUAR_IMPL ||
41182- impl == PANTHER_IMPL)) {
41183- struct cpufreq_driver *driver;
41184-
41185- ret = -ENOMEM;
41186- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
41187- if (!driver)
41188- goto err_out;
41189-
41190- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
41191- GFP_KERNEL);
41192- if (!us3_freq_table)
41193- goto err_out;
41194-
41195- driver->init = us3_freq_cpu_init;
41196- driver->verify = cpufreq_generic_frequency_table_verify;
41197- driver->target_index = us3_freq_target;
41198- driver->get = us3_freq_get;
41199- driver->exit = us3_freq_cpu_exit;
41200- strcpy(driver->name, "UltraSPARC-III");
41201-
41202- cpufreq_us3_driver = driver;
41203- ret = cpufreq_register_driver(driver);
41204- if (ret)
41205- goto err_out;
41206-
41207- return 0;
41208-
41209-err_out:
41210- if (driver) {
41211- kfree(driver);
41212- cpufreq_us3_driver = NULL;
41213- }
41214- kfree(us3_freq_table);
41215- us3_freq_table = NULL;
41216- return ret;
41217- }
41218+ impl == PANTHER_IMPL))
41219+ return cpufreq_register_driver(&cpufreq_us3_driver);
41220
41221 return -ENODEV;
41222 }
41223
41224 static void __exit us3_freq_exit(void)
41225 {
41226- if (cpufreq_us3_driver) {
41227- cpufreq_unregister_driver(cpufreq_us3_driver);
41228- kfree(cpufreq_us3_driver);
41229- cpufreq_us3_driver = NULL;
41230- kfree(us3_freq_table);
41231- us3_freq_table = NULL;
41232- }
41233+ cpufreq_unregister_driver(&cpufreq_us3_driver);
41234 }
41235
41236 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
41237diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
41238index 7d4a315..21bb886 100644
41239--- a/drivers/cpufreq/speedstep-centrino.c
41240+++ b/drivers/cpufreq/speedstep-centrino.c
41241@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
41242 !cpu_has(cpu, X86_FEATURE_EST))
41243 return -ENODEV;
41244
41245- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
41246- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
41247+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
41248+ pax_open_kernel();
41249+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
41250+ pax_close_kernel();
41251+ }
41252
41253 if (policy->cpu != 0)
41254 return -ENODEV;
41255diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
41256index 9634f20..e1499c7 100644
41257--- a/drivers/cpuidle/driver.c
41258+++ b/drivers/cpuidle/driver.c
41259@@ -205,7 +205,7 @@ static int poll_idle(struct cpuidle_device *dev,
41260
41261 static void poll_idle_init(struct cpuidle_driver *drv)
41262 {
41263- struct cpuidle_state *state = &drv->states[0];
41264+ cpuidle_state_no_const *state = &drv->states[0];
41265
41266 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
41267 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
41268diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
41269index ca89412..a7b9c49 100644
41270--- a/drivers/cpuidle/governor.c
41271+++ b/drivers/cpuidle/governor.c
41272@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
41273 mutex_lock(&cpuidle_lock);
41274 if (__cpuidle_find_governor(gov->name) == NULL) {
41275 ret = 0;
41276- list_add_tail(&gov->governor_list, &cpuidle_governors);
41277+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
41278 if (!cpuidle_curr_governor ||
41279 cpuidle_curr_governor->rating < gov->rating)
41280 cpuidle_switch_governor(gov);
41281diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
41282index efe2f17..b8124f9 100644
41283--- a/drivers/cpuidle/sysfs.c
41284+++ b/drivers/cpuidle/sysfs.c
41285@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
41286 NULL
41287 };
41288
41289-static struct attribute_group cpuidle_attr_group = {
41290+static attribute_group_no_const cpuidle_attr_group = {
41291 .attrs = cpuidle_default_attrs,
41292 .name = "cpuidle",
41293 };
41294diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
41295index 12fea3e..1e28f47 100644
41296--- a/drivers/crypto/hifn_795x.c
41297+++ b/drivers/crypto/hifn_795x.c
41298@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
41299 MODULE_PARM_DESC(hifn_pll_ref,
41300 "PLL reference clock (pci[freq] or ext[freq], default ext)");
41301
41302-static atomic_t hifn_dev_number;
41303+static atomic_unchecked_t hifn_dev_number;
41304
41305 #define ACRYPTO_OP_DECRYPT 0
41306 #define ACRYPTO_OP_ENCRYPT 1
41307@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
41308 goto err_out_disable_pci_device;
41309
41310 snprintf(name, sizeof(name), "hifn%d",
41311- atomic_inc_return(&hifn_dev_number)-1);
41312+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
41313
41314 err = pci_request_regions(pdev, name);
41315 if (err)
41316diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
41317index 9f90369..bfcacdb 100644
41318--- a/drivers/devfreq/devfreq.c
41319+++ b/drivers/devfreq/devfreq.c
41320@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
41321 goto err_out;
41322 }
41323
41324- list_add(&governor->node, &devfreq_governor_list);
41325+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
41326
41327 list_for_each_entry(devfreq, &devfreq_list, node) {
41328 int ret = 0;
41329@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
41330 }
41331 }
41332
41333- list_del(&governor->node);
41334+ pax_list_del((struct list_head *)&governor->node);
41335 err_out:
41336 mutex_unlock(&devfreq_list_lock);
41337
41338diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
41339index b35007e..55ad549 100644
41340--- a/drivers/dma/sh/shdma-base.c
41341+++ b/drivers/dma/sh/shdma-base.c
41342@@ -267,8 +267,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
41343 schan->slave_id = -EINVAL;
41344 }
41345
41346- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
41347- sdev->desc_size, GFP_KERNEL);
41348+ schan->desc = kcalloc(sdev->desc_size,
41349+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
41350 if (!schan->desc) {
41351 ret = -ENOMEM;
41352 goto edescalloc;
41353diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
41354index 146d5df..3c14970 100644
41355--- a/drivers/dma/sh/shdmac.c
41356+++ b/drivers/dma/sh/shdmac.c
41357@@ -514,7 +514,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
41358 return ret;
41359 }
41360
41361-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
41362+static struct notifier_block sh_dmae_nmi_notifier = {
41363 .notifier_call = sh_dmae_nmi_handler,
41364
41365 /* Run before NMI debug handler and KGDB */
41366diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
41367index 592af5f..bb1d583 100644
41368--- a/drivers/edac/edac_device.c
41369+++ b/drivers/edac/edac_device.c
41370@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
41371 */
41372 int edac_device_alloc_index(void)
41373 {
41374- static atomic_t device_indexes = ATOMIC_INIT(0);
41375+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
41376
41377- return atomic_inc_return(&device_indexes) - 1;
41378+ return atomic_inc_return_unchecked(&device_indexes) - 1;
41379 }
41380 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
41381
41382diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
41383index 01fae82..1dd8289 100644
41384--- a/drivers/edac/edac_mc_sysfs.c
41385+++ b/drivers/edac/edac_mc_sysfs.c
41386@@ -152,7 +152,7 @@ static const char * const edac_caps[] = {
41387 struct dev_ch_attribute {
41388 struct device_attribute attr;
41389 int channel;
41390-};
41391+} __do_const;
41392
41393 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
41394 struct dev_ch_attribute dev_attr_legacy_##_name = \
41395@@ -1009,14 +1009,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
41396 }
41397
41398 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
41399+ pax_open_kernel();
41400 if (mci->get_sdram_scrub_rate) {
41401- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
41402- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
41403+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
41404+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
41405 }
41406 if (mci->set_sdram_scrub_rate) {
41407- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
41408- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
41409+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
41410+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
41411 }
41412+ pax_close_kernel();
41413 err = device_create_file(&mci->dev,
41414 &dev_attr_sdram_scrub_rate);
41415 if (err) {
41416diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
41417index 2cf44b4d..6dd2dc7 100644
41418--- a/drivers/edac/edac_pci.c
41419+++ b/drivers/edac/edac_pci.c
41420@@ -29,7 +29,7 @@
41421
41422 static DEFINE_MUTEX(edac_pci_ctls_mutex);
41423 static LIST_HEAD(edac_pci_list);
41424-static atomic_t pci_indexes = ATOMIC_INIT(0);
41425+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
41426
41427 /*
41428 * edac_pci_alloc_ctl_info
41429@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
41430 */
41431 int edac_pci_alloc_index(void)
41432 {
41433- return atomic_inc_return(&pci_indexes) - 1;
41434+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
41435 }
41436 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
41437
41438diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
41439index e8658e4..22746d6 100644
41440--- a/drivers/edac/edac_pci_sysfs.c
41441+++ b/drivers/edac/edac_pci_sysfs.c
41442@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
41443 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
41444 static int edac_pci_poll_msec = 1000; /* one second workq period */
41445
41446-static atomic_t pci_parity_count = ATOMIC_INIT(0);
41447-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
41448+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
41449+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
41450
41451 static struct kobject *edac_pci_top_main_kobj;
41452 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
41453@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
41454 void *value;
41455 ssize_t(*show) (void *, char *);
41456 ssize_t(*store) (void *, const char *, size_t);
41457-};
41458+} __do_const;
41459
41460 /* Set of show/store abstract level functions for PCI Parity object */
41461 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
41462@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41463 edac_printk(KERN_CRIT, EDAC_PCI,
41464 "Signaled System Error on %s\n",
41465 pci_name(dev));
41466- atomic_inc(&pci_nonparity_count);
41467+ atomic_inc_unchecked(&pci_nonparity_count);
41468 }
41469
41470 if (status & (PCI_STATUS_PARITY)) {
41471@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41472 "Master Data Parity Error on %s\n",
41473 pci_name(dev));
41474
41475- atomic_inc(&pci_parity_count);
41476+ atomic_inc_unchecked(&pci_parity_count);
41477 }
41478
41479 if (status & (PCI_STATUS_DETECTED_PARITY)) {
41480@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41481 "Detected Parity Error on %s\n",
41482 pci_name(dev));
41483
41484- atomic_inc(&pci_parity_count);
41485+ atomic_inc_unchecked(&pci_parity_count);
41486 }
41487 }
41488
41489@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41490 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
41491 "Signaled System Error on %s\n",
41492 pci_name(dev));
41493- atomic_inc(&pci_nonparity_count);
41494+ atomic_inc_unchecked(&pci_nonparity_count);
41495 }
41496
41497 if (status & (PCI_STATUS_PARITY)) {
41498@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41499 "Master Data Parity Error on "
41500 "%s\n", pci_name(dev));
41501
41502- atomic_inc(&pci_parity_count);
41503+ atomic_inc_unchecked(&pci_parity_count);
41504 }
41505
41506 if (status & (PCI_STATUS_DETECTED_PARITY)) {
41507@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41508 "Detected Parity Error on %s\n",
41509 pci_name(dev));
41510
41511- atomic_inc(&pci_parity_count);
41512+ atomic_inc_unchecked(&pci_parity_count);
41513 }
41514 }
41515 }
41516@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
41517 if (!check_pci_errors)
41518 return;
41519
41520- before_count = atomic_read(&pci_parity_count);
41521+ before_count = atomic_read_unchecked(&pci_parity_count);
41522
41523 /* scan all PCI devices looking for a Parity Error on devices and
41524 * bridges.
41525@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
41526 /* Only if operator has selected panic on PCI Error */
41527 if (edac_pci_get_panic_on_pe()) {
41528 /* If the count is different 'after' from 'before' */
41529- if (before_count != atomic_read(&pci_parity_count))
41530+ if (before_count != atomic_read_unchecked(&pci_parity_count))
41531 panic("EDAC: PCI Parity Error");
41532 }
41533 }
41534diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
41535index 51b7e3a..aa8a3e8 100644
41536--- a/drivers/edac/mce_amd.h
41537+++ b/drivers/edac/mce_amd.h
41538@@ -77,7 +77,7 @@ struct amd_decoder_ops {
41539 bool (*mc0_mce)(u16, u8);
41540 bool (*mc1_mce)(u16, u8);
41541 bool (*mc2_mce)(u16, u8);
41542-};
41543+} __no_const;
41544
41545 void amd_report_gart_errors(bool);
41546 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
41547diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
41548index 57ea7f4..af06b76 100644
41549--- a/drivers/firewire/core-card.c
41550+++ b/drivers/firewire/core-card.c
41551@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
41552 const struct fw_card_driver *driver,
41553 struct device *device)
41554 {
41555- static atomic_t index = ATOMIC_INIT(-1);
41556+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
41557
41558- card->index = atomic_inc_return(&index);
41559+ card->index = atomic_inc_return_unchecked(&index);
41560 card->driver = driver;
41561 card->device = device;
41562 card->current_tlabel = 0;
41563@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
41564
41565 void fw_core_remove_card(struct fw_card *card)
41566 {
41567- struct fw_card_driver dummy_driver = dummy_driver_template;
41568+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
41569
41570 card->driver->update_phy_reg(card, 4,
41571 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
41572diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
41573index 2c6d5e1..a2cca6b 100644
41574--- a/drivers/firewire/core-device.c
41575+++ b/drivers/firewire/core-device.c
41576@@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
41577 struct config_rom_attribute {
41578 struct device_attribute attr;
41579 u32 key;
41580-};
41581+} __do_const;
41582
41583 static ssize_t show_immediate(struct device *dev,
41584 struct device_attribute *dattr, char *buf)
41585diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
41586index eb6935c..3cc2bfa 100644
41587--- a/drivers/firewire/core-transaction.c
41588+++ b/drivers/firewire/core-transaction.c
41589@@ -38,6 +38,7 @@
41590 #include <linux/timer.h>
41591 #include <linux/types.h>
41592 #include <linux/workqueue.h>
41593+#include <linux/sched.h>
41594
41595 #include <asm/byteorder.h>
41596
41597diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
41598index e1480ff6..1a429bd 100644
41599--- a/drivers/firewire/core.h
41600+++ b/drivers/firewire/core.h
41601@@ -111,6 +111,7 @@ struct fw_card_driver {
41602
41603 int (*stop_iso)(struct fw_iso_context *ctx);
41604 };
41605+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
41606
41607 void fw_card_initialize(struct fw_card *card,
41608 const struct fw_card_driver *driver, struct device *device);
41609diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
41610index a66a321..f6caf20 100644
41611--- a/drivers/firewire/ohci.c
41612+++ b/drivers/firewire/ohci.c
41613@@ -2056,10 +2056,12 @@ static void bus_reset_work(struct work_struct *work)
41614 be32_to_cpu(ohci->next_header));
41615 }
41616
41617+#ifndef CONFIG_GRKERNSEC
41618 if (param_remote_dma) {
41619 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
41620 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
41621 }
41622+#endif
41623
41624 spin_unlock_irq(&ohci->lock);
41625
41626@@ -2591,8 +2593,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
41627 unsigned long flags;
41628 int n, ret = 0;
41629
41630+#ifndef CONFIG_GRKERNSEC
41631 if (param_remote_dma)
41632 return 0;
41633+#endif
41634
41635 /*
41636 * FIXME: Make sure this bitmask is cleared when we clear the busReset
41637diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
41638index 94a58a0..f5eba42 100644
41639--- a/drivers/firmware/dmi-id.c
41640+++ b/drivers/firmware/dmi-id.c
41641@@ -16,7 +16,7 @@
41642 struct dmi_device_attribute{
41643 struct device_attribute dev_attr;
41644 int field;
41645-};
41646+} __do_const;
41647 #define to_dmi_dev_attr(_dev_attr) \
41648 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
41649
41650diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
41651index 17afc51..0ef90cd 100644
41652--- a/drivers/firmware/dmi_scan.c
41653+++ b/drivers/firmware/dmi_scan.c
41654@@ -835,7 +835,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
41655 if (buf == NULL)
41656 return -1;
41657
41658- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
41659+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
41660
41661 dmi_unmap(buf);
41662 return 0;
41663diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
41664index 1491dd4..aa910db 100644
41665--- a/drivers/firmware/efi/cper.c
41666+++ b/drivers/firmware/efi/cper.c
41667@@ -41,12 +41,12 @@
41668 */
41669 u64 cper_next_record_id(void)
41670 {
41671- static atomic64_t seq;
41672+ static atomic64_unchecked_t seq;
41673
41674- if (!atomic64_read(&seq))
41675- atomic64_set(&seq, ((u64)get_seconds()) << 32);
41676+ if (!atomic64_read_unchecked(&seq))
41677+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
41678
41679- return atomic64_inc_return(&seq);
41680+ return atomic64_inc_return_unchecked(&seq);
41681 }
41682 EXPORT_SYMBOL_GPL(cper_next_record_id);
41683
41684diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
41685index dc79346..b39bd69 100644
41686--- a/drivers/firmware/efi/efi.c
41687+++ b/drivers/firmware/efi/efi.c
41688@@ -122,14 +122,16 @@ static struct attribute_group efi_subsys_attr_group = {
41689 };
41690
41691 static struct efivars generic_efivars;
41692-static struct efivar_operations generic_ops;
41693+static efivar_operations_no_const generic_ops __read_only;
41694
41695 static int generic_ops_register(void)
41696 {
41697- generic_ops.get_variable = efi.get_variable;
41698- generic_ops.set_variable = efi.set_variable;
41699- generic_ops.get_next_variable = efi.get_next_variable;
41700- generic_ops.query_variable_store = efi_query_variable_store;
41701+ pax_open_kernel();
41702+ *(void **)&generic_ops.get_variable = efi.get_variable;
41703+ *(void **)&generic_ops.set_variable = efi.set_variable;
41704+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
41705+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
41706+ pax_close_kernel();
41707
41708 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
41709 }
41710diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
41711index 463c565..02a5640 100644
41712--- a/drivers/firmware/efi/efivars.c
41713+++ b/drivers/firmware/efi/efivars.c
41714@@ -588,7 +588,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
41715 static int
41716 create_efivars_bin_attributes(void)
41717 {
41718- struct bin_attribute *attr;
41719+ bin_attribute_no_const *attr;
41720 int error;
41721
41722 /* new_var */
41723diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
41724index 2f569aa..c95f4fb 100644
41725--- a/drivers/firmware/google/memconsole.c
41726+++ b/drivers/firmware/google/memconsole.c
41727@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
41728 if (!found_memconsole())
41729 return -ENODEV;
41730
41731- memconsole_bin_attr.size = memconsole_length;
41732+ pax_open_kernel();
41733+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
41734+ pax_close_kernel();
41735+
41736 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
41737 }
41738
41739diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
41740index cde3605..8b69df7 100644
41741--- a/drivers/gpio/gpio-em.c
41742+++ b/drivers/gpio/gpio-em.c
41743@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
41744 struct em_gio_priv *p;
41745 struct resource *io[2], *irq[2];
41746 struct gpio_chip *gpio_chip;
41747- struct irq_chip *irq_chip;
41748+ irq_chip_no_const *irq_chip;
41749 const char *name = dev_name(&pdev->dev);
41750 int ret;
41751
41752diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
41753index 7030422..42a3fe9 100644
41754--- a/drivers/gpio/gpio-ich.c
41755+++ b/drivers/gpio/gpio-ich.c
41756@@ -94,7 +94,7 @@ struct ichx_desc {
41757 * this option allows driver caching written output values
41758 */
41759 bool use_outlvl_cache;
41760-};
41761+} __do_const;
41762
41763 static struct {
41764 spinlock_t lock;
41765diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
41766index b6ae89e..ac7349c 100644
41767--- a/drivers/gpio/gpio-rcar.c
41768+++ b/drivers/gpio/gpio-rcar.c
41769@@ -357,7 +357,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
41770 struct gpio_rcar_priv *p;
41771 struct resource *io, *irq;
41772 struct gpio_chip *gpio_chip;
41773- struct irq_chip *irq_chip;
41774+ irq_chip_no_const *irq_chip;
41775 struct device *dev = &pdev->dev;
41776 const char *name = dev_name(dev);
41777 int ret;
41778diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
41779index 66cbcc1..0c5e622 100644
41780--- a/drivers/gpio/gpio-vr41xx.c
41781+++ b/drivers/gpio/gpio-vr41xx.c
41782@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
41783 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
41784 maskl, pendl, maskh, pendh);
41785
41786- atomic_inc(&irq_err_count);
41787+ atomic_inc_unchecked(&irq_err_count);
41788
41789 return -EINVAL;
41790 }
41791diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
41792index 2ebc907..01bdd6e 100644
41793--- a/drivers/gpio/gpiolib.c
41794+++ b/drivers/gpio/gpiolib.c
41795@@ -1482,8 +1482,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
41796 }
41797
41798 if (gpiochip->irqchip) {
41799- gpiochip->irqchip->irq_request_resources = NULL;
41800- gpiochip->irqchip->irq_release_resources = NULL;
41801+ pax_open_kernel();
41802+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
41803+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
41804+ pax_close_kernel();
41805 gpiochip->irqchip = NULL;
41806 }
41807 }
41808@@ -1549,8 +1551,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
41809 gpiochip->irqchip = NULL;
41810 return -EINVAL;
41811 }
41812- irqchip->irq_request_resources = gpiochip_irq_reqres;
41813- irqchip->irq_release_resources = gpiochip_irq_relres;
41814+
41815+ pax_open_kernel();
41816+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
41817+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
41818+ pax_close_kernel();
41819
41820 /*
41821 * Prepare the mapping since the irqchip shall be orthogonal to
41822diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
41823index fe94cc1..5e697b3 100644
41824--- a/drivers/gpu/drm/drm_crtc.c
41825+++ b/drivers/gpu/drm/drm_crtc.c
41826@@ -3584,7 +3584,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
41827 goto done;
41828 }
41829
41830- if (copy_to_user(&enum_ptr[copied].name,
41831+ if (copy_to_user(enum_ptr[copied].name,
41832 &prop_enum->name, DRM_PROP_NAME_LEN)) {
41833 ret = -EFAULT;
41834 goto done;
41835diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
41836index 8218078..9960928a 100644
41837--- a/drivers/gpu/drm/drm_drv.c
41838+++ b/drivers/gpu/drm/drm_drv.c
41839@@ -233,7 +233,7 @@ module_exit(drm_core_exit);
41840 /**
41841 * Copy and IOCTL return string to user space
41842 */
41843-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
41844+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
41845 {
41846 int len;
41847
41848@@ -342,7 +342,7 @@ long drm_ioctl(struct file *filp,
41849 struct drm_file *file_priv = filp->private_data;
41850 struct drm_device *dev;
41851 const struct drm_ioctl_desc *ioctl = NULL;
41852- drm_ioctl_t *func;
41853+ drm_ioctl_no_const_t func;
41854 unsigned int nr = DRM_IOCTL_NR(cmd);
41855 int retcode = -EINVAL;
41856 char stack_kdata[128];
41857diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
41858index 021fe5d..abc9ce6 100644
41859--- a/drivers/gpu/drm/drm_fops.c
41860+++ b/drivers/gpu/drm/drm_fops.c
41861@@ -88,7 +88,7 @@ int drm_open(struct inode *inode, struct file *filp)
41862 return PTR_ERR(minor);
41863
41864 dev = minor->dev;
41865- if (!dev->open_count++)
41866+ if (local_inc_return(&dev->open_count) == 1)
41867 need_setup = 1;
41868
41869 /* share address_space across all char-devs of a single device */
41870@@ -105,7 +105,7 @@ int drm_open(struct inode *inode, struct file *filp)
41871 return 0;
41872
41873 err_undo:
41874- dev->open_count--;
41875+ local_dec(&dev->open_count);
41876 drm_minor_release(minor);
41877 return retcode;
41878 }
41879@@ -427,7 +427,7 @@ int drm_release(struct inode *inode, struct file *filp)
41880
41881 mutex_lock(&drm_global_mutex);
41882
41883- DRM_DEBUG("open_count = %d\n", dev->open_count);
41884+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
41885
41886 if (dev->driver->preclose)
41887 dev->driver->preclose(dev, file_priv);
41888@@ -436,10 +436,10 @@ int drm_release(struct inode *inode, struct file *filp)
41889 * Begin inline drm_release
41890 */
41891
41892- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
41893+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
41894 task_pid_nr(current),
41895 (long)old_encode_dev(file_priv->minor->kdev->devt),
41896- dev->open_count);
41897+ local_read(&dev->open_count));
41898
41899 /* Release any auth tokens that might point to this file_priv,
41900 (do that under the drm_global_mutex) */
41901@@ -540,7 +540,7 @@ int drm_release(struct inode *inode, struct file *filp)
41902 * End inline drm_release
41903 */
41904
41905- if (!--dev->open_count) {
41906+ if (local_dec_and_test(&dev->open_count)) {
41907 retcode = drm_lastclose(dev);
41908 if (drm_device_is_unplugged(dev))
41909 drm_put_dev(dev);
41910diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
41911index 3d2e91c..d31c4c9 100644
41912--- a/drivers/gpu/drm/drm_global.c
41913+++ b/drivers/gpu/drm/drm_global.c
41914@@ -36,7 +36,7 @@
41915 struct drm_global_item {
41916 struct mutex mutex;
41917 void *object;
41918- int refcount;
41919+ atomic_t refcount;
41920 };
41921
41922 static struct drm_global_item glob[DRM_GLOBAL_NUM];
41923@@ -49,7 +49,7 @@ void drm_global_init(void)
41924 struct drm_global_item *item = &glob[i];
41925 mutex_init(&item->mutex);
41926 item->object = NULL;
41927- item->refcount = 0;
41928+ atomic_set(&item->refcount, 0);
41929 }
41930 }
41931
41932@@ -59,7 +59,7 @@ void drm_global_release(void)
41933 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
41934 struct drm_global_item *item = &glob[i];
41935 BUG_ON(item->object != NULL);
41936- BUG_ON(item->refcount != 0);
41937+ BUG_ON(atomic_read(&item->refcount) != 0);
41938 }
41939 }
41940
41941@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41942 struct drm_global_item *item = &glob[ref->global_type];
41943
41944 mutex_lock(&item->mutex);
41945- if (item->refcount == 0) {
41946+ if (atomic_read(&item->refcount) == 0) {
41947 item->object = kzalloc(ref->size, GFP_KERNEL);
41948 if (unlikely(item->object == NULL)) {
41949 ret = -ENOMEM;
41950@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41951 goto out_err;
41952
41953 }
41954- ++item->refcount;
41955+ atomic_inc(&item->refcount);
41956 ref->object = item->object;
41957 mutex_unlock(&item->mutex);
41958 return 0;
41959@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
41960 struct drm_global_item *item = &glob[ref->global_type];
41961
41962 mutex_lock(&item->mutex);
41963- BUG_ON(item->refcount == 0);
41964+ BUG_ON(atomic_read(&item->refcount) == 0);
41965 BUG_ON(ref->object != item->object);
41966- if (--item->refcount == 0) {
41967+ if (atomic_dec_and_test(&item->refcount)) {
41968 ref->release(ref);
41969 item->object = NULL;
41970 }
41971diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
41972index 86feedd..cba70f5 100644
41973--- a/drivers/gpu/drm/drm_info.c
41974+++ b/drivers/gpu/drm/drm_info.c
41975@@ -73,10 +73,13 @@ int drm_vm_info(struct seq_file *m, void *data)
41976 struct drm_local_map *map;
41977 struct drm_map_list *r_list;
41978
41979- /* Hardcoded from _DRM_FRAME_BUFFER,
41980- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
41981- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
41982- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
41983+ static const char * const types[] = {
41984+ [_DRM_FRAME_BUFFER] = "FB",
41985+ [_DRM_REGISTERS] = "REG",
41986+ [_DRM_SHM] = "SHM",
41987+ [_DRM_AGP] = "AGP",
41988+ [_DRM_SCATTER_GATHER] = "SG",
41989+ [_DRM_CONSISTENT] = "PCI"};
41990 const char *type;
41991 int i;
41992
41993@@ -87,7 +90,7 @@ int drm_vm_info(struct seq_file *m, void *data)
41994 map = r_list->map;
41995 if (!map)
41996 continue;
41997- if (map->type < 0 || map->type > 5)
41998+ if (map->type >= ARRAY_SIZE(types))
41999 type = "??";
42000 else
42001 type = types[map->type];
42002@@ -259,7 +262,11 @@ int drm_vma_info(struct seq_file *m, void *data)
42003 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
42004 vma->vm_flags & VM_LOCKED ? 'l' : '-',
42005 vma->vm_flags & VM_IO ? 'i' : '-',
42006+#ifdef CONFIG_GRKERNSEC_HIDESYM
42007+ 0);
42008+#else
42009 vma->vm_pgoff);
42010+#endif
42011
42012 #if defined(__i386__)
42013 pgprot = pgprot_val(vma->vm_page_prot);
42014diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
42015index 2f4c4343..dd12cd2 100644
42016--- a/drivers/gpu/drm/drm_ioc32.c
42017+++ b/drivers/gpu/drm/drm_ioc32.c
42018@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
42019 request = compat_alloc_user_space(nbytes);
42020 if (!access_ok(VERIFY_WRITE, request, nbytes))
42021 return -EFAULT;
42022- list = (struct drm_buf_desc *) (request + 1);
42023+ list = (struct drm_buf_desc __user *) (request + 1);
42024
42025 if (__put_user(count, &request->count)
42026 || __put_user(list, &request->list))
42027@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
42028 request = compat_alloc_user_space(nbytes);
42029 if (!access_ok(VERIFY_WRITE, request, nbytes))
42030 return -EFAULT;
42031- list = (struct drm_buf_pub *) (request + 1);
42032+ list = (struct drm_buf_pub __user *) (request + 1);
42033
42034 if (__put_user(count, &request->count)
42035 || __put_user(list, &request->list))
42036@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
42037 return 0;
42038 }
42039
42040-drm_ioctl_compat_t *drm_compat_ioctls[] = {
42041+drm_ioctl_compat_t drm_compat_ioctls[] = {
42042 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
42043 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
42044 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
42045@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
42046 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42047 {
42048 unsigned int nr = DRM_IOCTL_NR(cmd);
42049- drm_ioctl_compat_t *fn;
42050 int ret;
42051
42052 /* Assume that ioctls without an explicit compat routine will just
42053@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42054 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
42055 return drm_ioctl(filp, cmd, arg);
42056
42057- fn = drm_compat_ioctls[nr];
42058-
42059- if (fn != NULL)
42060- ret = (*fn) (filp, cmd, arg);
42061+ if (drm_compat_ioctls[nr] != NULL)
42062+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
42063 else
42064 ret = drm_ioctl(filp, cmd, arg);
42065
42066diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
42067index 14d1646..99f9d49 100644
42068--- a/drivers/gpu/drm/drm_stub.c
42069+++ b/drivers/gpu/drm/drm_stub.c
42070@@ -455,7 +455,7 @@ void drm_unplug_dev(struct drm_device *dev)
42071
42072 drm_device_set_unplugged(dev);
42073
42074- if (dev->open_count == 0) {
42075+ if (local_read(&dev->open_count) == 0) {
42076 drm_put_dev(dev);
42077 }
42078 mutex_unlock(&drm_global_mutex);
42079diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
42080index 369b262..09ea3ab 100644
42081--- a/drivers/gpu/drm/drm_sysfs.c
42082+++ b/drivers/gpu/drm/drm_sysfs.c
42083@@ -505,7 +505,7 @@ static void drm_sysfs_release(struct device *dev)
42084 */
42085 int drm_sysfs_device_add(struct drm_minor *minor)
42086 {
42087- char *minor_str;
42088+ const char *minor_str;
42089 int r;
42090
42091 if (minor->type == DRM_MINOR_CONTROL)
42092diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
42093index d4d16ed..8fb0b51 100644
42094--- a/drivers/gpu/drm/i810/i810_drv.h
42095+++ b/drivers/gpu/drm/i810/i810_drv.h
42096@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
42097 int page_flipping;
42098
42099 wait_queue_head_t irq_queue;
42100- atomic_t irq_received;
42101- atomic_t irq_emitted;
42102+ atomic_unchecked_t irq_received;
42103+ atomic_unchecked_t irq_emitted;
42104
42105 int front_offset;
42106 } drm_i810_private_t;
42107diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
42108index d443441..ab091dd 100644
42109--- a/drivers/gpu/drm/i915/i915_dma.c
42110+++ b/drivers/gpu/drm/i915/i915_dma.c
42111@@ -1290,7 +1290,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
42112 * locking inversion with the driver load path. And the access here is
42113 * completely racy anyway. So don't bother with locking for now.
42114 */
42115- return dev->open_count == 0;
42116+ return local_read(&dev->open_count) == 0;
42117 }
42118
42119 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
42120diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42121index 3a30133..ef4a743 100644
42122--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42123+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42124@@ -891,9 +891,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
42125
42126 static int
42127 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
42128- int count)
42129+ unsigned int count)
42130 {
42131- int i;
42132+ unsigned int i;
42133 unsigned relocs_total = 0;
42134 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
42135
42136diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
42137index 2e0613e..a8b94d9 100644
42138--- a/drivers/gpu/drm/i915/i915_ioc32.c
42139+++ b/drivers/gpu/drm/i915/i915_ioc32.c
42140@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
42141 (unsigned long)request);
42142 }
42143
42144-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
42145+static drm_ioctl_compat_t i915_compat_ioctls[] = {
42146 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
42147 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
42148 [DRM_I915_GETPARAM] = compat_i915_getparam,
42149@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
42150 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42151 {
42152 unsigned int nr = DRM_IOCTL_NR(cmd);
42153- drm_ioctl_compat_t *fn = NULL;
42154 int ret;
42155
42156 if (nr < DRM_COMMAND_BASE)
42157 return drm_compat_ioctl(filp, cmd, arg);
42158
42159- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
42160- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
42161-
42162- if (fn != NULL)
42163+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
42164+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
42165 ret = (*fn) (filp, cmd, arg);
42166- else
42167+ } else
42168 ret = drm_ioctl(filp, cmd, arg);
42169
42170 return ret;
42171diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
42172index f0be855..94e82d9 100644
42173--- a/drivers/gpu/drm/i915/intel_display.c
42174+++ b/drivers/gpu/drm/i915/intel_display.c
42175@@ -11604,13 +11604,13 @@ struct intel_quirk {
42176 int subsystem_vendor;
42177 int subsystem_device;
42178 void (*hook)(struct drm_device *dev);
42179-};
42180+} __do_const;
42181
42182 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
42183 struct intel_dmi_quirk {
42184 void (*hook)(struct drm_device *dev);
42185 const struct dmi_system_id (*dmi_id_list)[];
42186-};
42187+} __do_const;
42188
42189 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
42190 {
42191@@ -11618,18 +11618,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
42192 return 1;
42193 }
42194
42195-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
42196+static const struct dmi_system_id intel_dmi_quirks_table[] = {
42197 {
42198- .dmi_id_list = &(const struct dmi_system_id[]) {
42199- {
42200- .callback = intel_dmi_reverse_brightness,
42201- .ident = "NCR Corporation",
42202- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
42203- DMI_MATCH(DMI_PRODUCT_NAME, ""),
42204- },
42205- },
42206- { } /* terminating entry */
42207+ .callback = intel_dmi_reverse_brightness,
42208+ .ident = "NCR Corporation",
42209+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
42210+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
42211 },
42212+ },
42213+ { } /* terminating entry */
42214+};
42215+
42216+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
42217+ {
42218+ .dmi_id_list = &intel_dmi_quirks_table,
42219 .hook = quirk_invert_brightness,
42220 },
42221 };
42222diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
42223index fe45321..836fdca 100644
42224--- a/drivers/gpu/drm/mga/mga_drv.h
42225+++ b/drivers/gpu/drm/mga/mga_drv.h
42226@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
42227 u32 clear_cmd;
42228 u32 maccess;
42229
42230- atomic_t vbl_received; /**< Number of vblanks received. */
42231+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
42232 wait_queue_head_t fence_queue;
42233- atomic_t last_fence_retired;
42234+ atomic_unchecked_t last_fence_retired;
42235 u32 next_fence_to_post;
42236
42237 unsigned int fb_cpp;
42238diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
42239index 729bfd5..ead8823 100644
42240--- a/drivers/gpu/drm/mga/mga_ioc32.c
42241+++ b/drivers/gpu/drm/mga/mga_ioc32.c
42242@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
42243 return 0;
42244 }
42245
42246-drm_ioctl_compat_t *mga_compat_ioctls[] = {
42247+drm_ioctl_compat_t mga_compat_ioctls[] = {
42248 [DRM_MGA_INIT] = compat_mga_init,
42249 [DRM_MGA_GETPARAM] = compat_mga_getparam,
42250 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
42251@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
42252 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42253 {
42254 unsigned int nr = DRM_IOCTL_NR(cmd);
42255- drm_ioctl_compat_t *fn = NULL;
42256 int ret;
42257
42258 if (nr < DRM_COMMAND_BASE)
42259 return drm_compat_ioctl(filp, cmd, arg);
42260
42261- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
42262- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
42263-
42264- if (fn != NULL)
42265+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
42266+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
42267 ret = (*fn) (filp, cmd, arg);
42268- else
42269+ } else
42270 ret = drm_ioctl(filp, cmd, arg);
42271
42272 return ret;
42273diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
42274index 1b071b8..de8601a 100644
42275--- a/drivers/gpu/drm/mga/mga_irq.c
42276+++ b/drivers/gpu/drm/mga/mga_irq.c
42277@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
42278 if (crtc != 0)
42279 return 0;
42280
42281- return atomic_read(&dev_priv->vbl_received);
42282+ return atomic_read_unchecked(&dev_priv->vbl_received);
42283 }
42284
42285
42286@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
42287 /* VBLANK interrupt */
42288 if (status & MGA_VLINEPEN) {
42289 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
42290- atomic_inc(&dev_priv->vbl_received);
42291+ atomic_inc_unchecked(&dev_priv->vbl_received);
42292 drm_handle_vblank(dev, 0);
42293 handled = 1;
42294 }
42295@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
42296 if ((prim_start & ~0x03) != (prim_end & ~0x03))
42297 MGA_WRITE(MGA_PRIMEND, prim_end);
42298
42299- atomic_inc(&dev_priv->last_fence_retired);
42300+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
42301 wake_up(&dev_priv->fence_queue);
42302 handled = 1;
42303 }
42304@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
42305 * using fences.
42306 */
42307 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
42308- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
42309+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
42310 - *sequence) <= (1 << 23)));
42311
42312 *sequence = cur_fence;
42313diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
42314index 8268a4c..5105708 100644
42315--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
42316+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
42317@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
42318 struct bit_table {
42319 const char id;
42320 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
42321-};
42322+} __no_const;
42323
42324 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
42325
42326diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
42327index 7efbafa..19f8087 100644
42328--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
42329+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
42330@@ -97,7 +97,6 @@ struct nouveau_drm {
42331 struct drm_global_reference mem_global_ref;
42332 struct ttm_bo_global_ref bo_global_ref;
42333 struct ttm_bo_device bdev;
42334- atomic_t validate_sequence;
42335 int (*move)(struct nouveau_channel *,
42336 struct ttm_buffer_object *,
42337 struct ttm_mem_reg *, struct ttm_mem_reg *);
42338diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42339index 462679a..88e32a7 100644
42340--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42341+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42342@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
42343 unsigned long arg)
42344 {
42345 unsigned int nr = DRM_IOCTL_NR(cmd);
42346- drm_ioctl_compat_t *fn = NULL;
42347+ drm_ioctl_compat_t fn = NULL;
42348 int ret;
42349
42350 if (nr < DRM_COMMAND_BASE)
42351diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
42352index ab0228f..20b756b 100644
42353--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
42354+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
42355@@ -130,11 +130,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42356 }
42357
42358 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
42359- nouveau_vram_manager_init,
42360- nouveau_vram_manager_fini,
42361- nouveau_vram_manager_new,
42362- nouveau_vram_manager_del,
42363- nouveau_vram_manager_debug
42364+ .init = nouveau_vram_manager_init,
42365+ .takedown = nouveau_vram_manager_fini,
42366+ .get_node = nouveau_vram_manager_new,
42367+ .put_node = nouveau_vram_manager_del,
42368+ .debug = nouveau_vram_manager_debug
42369 };
42370
42371 static int
42372@@ -199,11 +199,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42373 }
42374
42375 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
42376- nouveau_gart_manager_init,
42377- nouveau_gart_manager_fini,
42378- nouveau_gart_manager_new,
42379- nouveau_gart_manager_del,
42380- nouveau_gart_manager_debug
42381+ .init = nouveau_gart_manager_init,
42382+ .takedown = nouveau_gart_manager_fini,
42383+ .get_node = nouveau_gart_manager_new,
42384+ .put_node = nouveau_gart_manager_del,
42385+ .debug = nouveau_gart_manager_debug
42386 };
42387
42388 #include <core/subdev/vm/nv04.h>
42389@@ -271,11 +271,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42390 }
42391
42392 const struct ttm_mem_type_manager_func nv04_gart_manager = {
42393- nv04_gart_manager_init,
42394- nv04_gart_manager_fini,
42395- nv04_gart_manager_new,
42396- nv04_gart_manager_del,
42397- nv04_gart_manager_debug
42398+ .init = nv04_gart_manager_init,
42399+ .takedown = nv04_gart_manager_fini,
42400+ .get_node = nv04_gart_manager_new,
42401+ .put_node = nv04_gart_manager_del,
42402+ .debug = nv04_gart_manager_debug
42403 };
42404
42405 int
42406diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
42407index 4f4c3fe..2cce716 100644
42408--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
42409+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
42410@@ -70,7 +70,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
42411 * locking inversion with the driver load path. And the access here is
42412 * completely racy anyway. So don't bother with locking for now.
42413 */
42414- return dev->open_count == 0;
42415+ return local_read(&dev->open_count) == 0;
42416 }
42417
42418 static const struct vga_switcheroo_client_ops
42419diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
42420index eb89653..613cf71 100644
42421--- a/drivers/gpu/drm/qxl/qxl_cmd.c
42422+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
42423@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
42424 int ret;
42425
42426 mutex_lock(&qdev->async_io_mutex);
42427- irq_num = atomic_read(&qdev->irq_received_io_cmd);
42428+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
42429 if (qdev->last_sent_io_cmd > irq_num) {
42430 if (intr)
42431 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
42432- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42433+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42434 else
42435 ret = wait_event_timeout(qdev->io_cmd_event,
42436- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42437+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42438 /* 0 is timeout, just bail the "hw" has gone away */
42439 if (ret <= 0)
42440 goto out;
42441- irq_num = atomic_read(&qdev->irq_received_io_cmd);
42442+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
42443 }
42444 outb(val, addr);
42445 qdev->last_sent_io_cmd = irq_num + 1;
42446 if (intr)
42447 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
42448- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42449+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42450 else
42451 ret = wait_event_timeout(qdev->io_cmd_event,
42452- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42453+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42454 out:
42455 if (ret > 0)
42456 ret = 0;
42457diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
42458index c3c2bbd..bc3c0fb 100644
42459--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
42460+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
42461@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
42462 struct drm_info_node *node = (struct drm_info_node *) m->private;
42463 struct qxl_device *qdev = node->minor->dev->dev_private;
42464
42465- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
42466- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
42467- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
42468- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
42469+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
42470+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
42471+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
42472+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
42473 seq_printf(m, "%d\n", qdev->irq_received_error);
42474 return 0;
42475 }
42476diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
42477index 36ed40b..0397633 100644
42478--- a/drivers/gpu/drm/qxl/qxl_drv.h
42479+++ b/drivers/gpu/drm/qxl/qxl_drv.h
42480@@ -290,10 +290,10 @@ struct qxl_device {
42481 unsigned int last_sent_io_cmd;
42482
42483 /* interrupt handling */
42484- atomic_t irq_received;
42485- atomic_t irq_received_display;
42486- atomic_t irq_received_cursor;
42487- atomic_t irq_received_io_cmd;
42488+ atomic_unchecked_t irq_received;
42489+ atomic_unchecked_t irq_received_display;
42490+ atomic_unchecked_t irq_received_cursor;
42491+ atomic_unchecked_t irq_received_io_cmd;
42492 unsigned irq_received_error;
42493 wait_queue_head_t display_event;
42494 wait_queue_head_t cursor_event;
42495diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
42496index b110883..dd06418 100644
42497--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
42498+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
42499@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
42500
42501 /* TODO copy slow path code from i915 */
42502 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
42503- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
42504+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
42505
42506 {
42507 struct qxl_drawable *draw = fb_cmd;
42508@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
42509 struct drm_qxl_reloc reloc;
42510
42511 if (copy_from_user(&reloc,
42512- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
42513+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
42514 sizeof(reloc))) {
42515 ret = -EFAULT;
42516 goto out_free_bos;
42517@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
42518
42519 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
42520
42521- struct drm_qxl_command *commands =
42522- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
42523+ struct drm_qxl_command __user *commands =
42524+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
42525
42526- if (copy_from_user(&user_cmd, &commands[cmd_num],
42527+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
42528 sizeof(user_cmd)))
42529 return -EFAULT;
42530
42531diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
42532index 0bf1e20..42a7310 100644
42533--- a/drivers/gpu/drm/qxl/qxl_irq.c
42534+++ b/drivers/gpu/drm/qxl/qxl_irq.c
42535@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
42536 if (!pending)
42537 return IRQ_NONE;
42538
42539- atomic_inc(&qdev->irq_received);
42540+ atomic_inc_unchecked(&qdev->irq_received);
42541
42542 if (pending & QXL_INTERRUPT_DISPLAY) {
42543- atomic_inc(&qdev->irq_received_display);
42544+ atomic_inc_unchecked(&qdev->irq_received_display);
42545 wake_up_all(&qdev->display_event);
42546 qxl_queue_garbage_collect(qdev, false);
42547 }
42548 if (pending & QXL_INTERRUPT_CURSOR) {
42549- atomic_inc(&qdev->irq_received_cursor);
42550+ atomic_inc_unchecked(&qdev->irq_received_cursor);
42551 wake_up_all(&qdev->cursor_event);
42552 }
42553 if (pending & QXL_INTERRUPT_IO_CMD) {
42554- atomic_inc(&qdev->irq_received_io_cmd);
42555+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
42556 wake_up_all(&qdev->io_cmd_event);
42557 }
42558 if (pending & QXL_INTERRUPT_ERROR) {
42559@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
42560 init_waitqueue_head(&qdev->io_cmd_event);
42561 INIT_WORK(&qdev->client_monitors_config_work,
42562 qxl_client_monitors_config_work_func);
42563- atomic_set(&qdev->irq_received, 0);
42564- atomic_set(&qdev->irq_received_display, 0);
42565- atomic_set(&qdev->irq_received_cursor, 0);
42566- atomic_set(&qdev->irq_received_io_cmd, 0);
42567+ atomic_set_unchecked(&qdev->irq_received, 0);
42568+ atomic_set_unchecked(&qdev->irq_received_display, 0);
42569+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
42570+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
42571 qdev->irq_received_error = 0;
42572 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
42573 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
42574diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
42575index 71a1bae..cb1f103 100644
42576--- a/drivers/gpu/drm/qxl/qxl_ttm.c
42577+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
42578@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
42579 }
42580 }
42581
42582-static struct vm_operations_struct qxl_ttm_vm_ops;
42583+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
42584 static const struct vm_operations_struct *ttm_vm_ops;
42585
42586 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42587@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
42588 return r;
42589 if (unlikely(ttm_vm_ops == NULL)) {
42590 ttm_vm_ops = vma->vm_ops;
42591+ pax_open_kernel();
42592 qxl_ttm_vm_ops = *ttm_vm_ops;
42593 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
42594+ pax_close_kernel();
42595 }
42596 vma->vm_ops = &qxl_ttm_vm_ops;
42597 return 0;
42598@@ -555,25 +557,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
42599 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
42600 {
42601 #if defined(CONFIG_DEBUG_FS)
42602- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
42603- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
42604- unsigned i;
42605+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
42606+ {
42607+ .name = "qxl_mem_mm",
42608+ .show = &qxl_mm_dump_table,
42609+ },
42610+ {
42611+ .name = "qxl_surf_mm",
42612+ .show = &qxl_mm_dump_table,
42613+ }
42614+ };
42615
42616- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
42617- if (i == 0)
42618- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
42619- else
42620- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
42621- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
42622- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
42623- qxl_mem_types_list[i].driver_features = 0;
42624- if (i == 0)
42625- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
42626- else
42627- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
42628+ pax_open_kernel();
42629+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
42630+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
42631+ pax_close_kernel();
42632
42633- }
42634- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
42635+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
42636 #else
42637 return 0;
42638 #endif
42639diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
42640index 59459fe..be26b31 100644
42641--- a/drivers/gpu/drm/r128/r128_cce.c
42642+++ b/drivers/gpu/drm/r128/r128_cce.c
42643@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
42644
42645 /* GH: Simple idle check.
42646 */
42647- atomic_set(&dev_priv->idle_count, 0);
42648+ atomic_set_unchecked(&dev_priv->idle_count, 0);
42649
42650 /* We don't support anything other than bus-mastering ring mode,
42651 * but the ring can be in either AGP or PCI space for the ring
42652diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
42653index 5bf3f5f..7000661 100644
42654--- a/drivers/gpu/drm/r128/r128_drv.h
42655+++ b/drivers/gpu/drm/r128/r128_drv.h
42656@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
42657 int is_pci;
42658 unsigned long cce_buffers_offset;
42659
42660- atomic_t idle_count;
42661+ atomic_unchecked_t idle_count;
42662
42663 int page_flipping;
42664 int current_page;
42665 u32 crtc_offset;
42666 u32 crtc_offset_cntl;
42667
42668- atomic_t vbl_received;
42669+ atomic_unchecked_t vbl_received;
42670
42671 u32 color_fmt;
42672 unsigned int front_offset;
42673diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
42674index 663f38c..c689495 100644
42675--- a/drivers/gpu/drm/r128/r128_ioc32.c
42676+++ b/drivers/gpu/drm/r128/r128_ioc32.c
42677@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
42678 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
42679 }
42680
42681-drm_ioctl_compat_t *r128_compat_ioctls[] = {
42682+drm_ioctl_compat_t r128_compat_ioctls[] = {
42683 [DRM_R128_INIT] = compat_r128_init,
42684 [DRM_R128_DEPTH] = compat_r128_depth,
42685 [DRM_R128_STIPPLE] = compat_r128_stipple,
42686@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
42687 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42688 {
42689 unsigned int nr = DRM_IOCTL_NR(cmd);
42690- drm_ioctl_compat_t *fn = NULL;
42691 int ret;
42692
42693 if (nr < DRM_COMMAND_BASE)
42694 return drm_compat_ioctl(filp, cmd, arg);
42695
42696- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
42697- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
42698-
42699- if (fn != NULL)
42700+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
42701+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
42702 ret = (*fn) (filp, cmd, arg);
42703- else
42704+ } else
42705 ret = drm_ioctl(filp, cmd, arg);
42706
42707 return ret;
42708diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
42709index c2ae496..30b5993 100644
42710--- a/drivers/gpu/drm/r128/r128_irq.c
42711+++ b/drivers/gpu/drm/r128/r128_irq.c
42712@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
42713 if (crtc != 0)
42714 return 0;
42715
42716- return atomic_read(&dev_priv->vbl_received);
42717+ return atomic_read_unchecked(&dev_priv->vbl_received);
42718 }
42719
42720 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
42721@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
42722 /* VBLANK interrupt */
42723 if (status & R128_CRTC_VBLANK_INT) {
42724 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
42725- atomic_inc(&dev_priv->vbl_received);
42726+ atomic_inc_unchecked(&dev_priv->vbl_received);
42727 drm_handle_vblank(dev, 0);
42728 return IRQ_HANDLED;
42729 }
42730diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
42731index 575e986..66e62ca 100644
42732--- a/drivers/gpu/drm/r128/r128_state.c
42733+++ b/drivers/gpu/drm/r128/r128_state.c
42734@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
42735
42736 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
42737 {
42738- if (atomic_read(&dev_priv->idle_count) == 0)
42739+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
42740 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
42741 else
42742- atomic_set(&dev_priv->idle_count, 0);
42743+ atomic_set_unchecked(&dev_priv->idle_count, 0);
42744 }
42745
42746 #endif
42747diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
42748index 4a85bb6..aaea819 100644
42749--- a/drivers/gpu/drm/radeon/mkregtable.c
42750+++ b/drivers/gpu/drm/radeon/mkregtable.c
42751@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
42752 regex_t mask_rex;
42753 regmatch_t match[4];
42754 char buf[1024];
42755- size_t end;
42756+ long end;
42757 int len;
42758 int done = 0;
42759 int r;
42760 unsigned o;
42761 struct offset *offset;
42762 char last_reg_s[10];
42763- int last_reg;
42764+ unsigned long last_reg;
42765
42766 if (regcomp
42767 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
42768diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
42769index 697add2..9860f5b 100644
42770--- a/drivers/gpu/drm/radeon/radeon_device.c
42771+++ b/drivers/gpu/drm/radeon/radeon_device.c
42772@@ -1169,7 +1169,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
42773 * locking inversion with the driver load path. And the access here is
42774 * completely racy anyway. So don't bother with locking for now.
42775 */
42776- return dev->open_count == 0;
42777+ return local_read(&dev->open_count) == 0;
42778 }
42779
42780 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
42781diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
42782index dafd812..1bf20c7 100644
42783--- a/drivers/gpu/drm/radeon/radeon_drv.h
42784+++ b/drivers/gpu/drm/radeon/radeon_drv.h
42785@@ -262,7 +262,7 @@ typedef struct drm_radeon_private {
42786
42787 /* SW interrupt */
42788 wait_queue_head_t swi_queue;
42789- atomic_t swi_emitted;
42790+ atomic_unchecked_t swi_emitted;
42791 int vblank_crtc;
42792 uint32_t irq_enable_reg;
42793 uint32_t r500_disp_irq_reg;
42794diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
42795index 0b98ea1..0881827 100644
42796--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
42797+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
42798@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42799 request = compat_alloc_user_space(sizeof(*request));
42800 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
42801 || __put_user(req32.param, &request->param)
42802- || __put_user((void __user *)(unsigned long)req32.value,
42803+ || __put_user((unsigned long)req32.value,
42804 &request->value))
42805 return -EFAULT;
42806
42807@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42808 #define compat_radeon_cp_setparam NULL
42809 #endif /* X86_64 || IA64 */
42810
42811-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42812+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
42813 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
42814 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
42815 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
42816@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42817 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42818 {
42819 unsigned int nr = DRM_IOCTL_NR(cmd);
42820- drm_ioctl_compat_t *fn = NULL;
42821 int ret;
42822
42823 if (nr < DRM_COMMAND_BASE)
42824 return drm_compat_ioctl(filp, cmd, arg);
42825
42826- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
42827- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42828-
42829- if (fn != NULL)
42830+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
42831+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42832 ret = (*fn) (filp, cmd, arg);
42833- else
42834+ } else
42835 ret = drm_ioctl(filp, cmd, arg);
42836
42837 return ret;
42838diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
42839index 244b19b..c19226d 100644
42840--- a/drivers/gpu/drm/radeon/radeon_irq.c
42841+++ b/drivers/gpu/drm/radeon/radeon_irq.c
42842@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
42843 unsigned int ret;
42844 RING_LOCALS;
42845
42846- atomic_inc(&dev_priv->swi_emitted);
42847- ret = atomic_read(&dev_priv->swi_emitted);
42848+ atomic_inc_unchecked(&dev_priv->swi_emitted);
42849+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
42850
42851 BEGIN_RING(4);
42852 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
42853@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
42854 drm_radeon_private_t *dev_priv =
42855 (drm_radeon_private_t *) dev->dev_private;
42856
42857- atomic_set(&dev_priv->swi_emitted, 0);
42858+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
42859 init_waitqueue_head(&dev_priv->swi_queue);
42860
42861 dev->max_vblank_count = 0x001fffff;
42862diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
42863index 23bb64f..69d7234 100644
42864--- a/drivers/gpu/drm/radeon/radeon_state.c
42865+++ b/drivers/gpu/drm/radeon/radeon_state.c
42866@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
42867 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
42868 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
42869
42870- if (copy_from_user(&depth_boxes, clear->depth_boxes,
42871+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
42872 sarea_priv->nbox * sizeof(depth_boxes[0])))
42873 return -EFAULT;
42874
42875@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
42876 {
42877 drm_radeon_private_t *dev_priv = dev->dev_private;
42878 drm_radeon_getparam_t *param = data;
42879- int value;
42880+ int value = 0;
42881
42882 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
42883
42884diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
42885index c8a8a51..219dacc 100644
42886--- a/drivers/gpu/drm/radeon/radeon_ttm.c
42887+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
42888@@ -797,7 +797,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
42889 man->size = size >> PAGE_SHIFT;
42890 }
42891
42892-static struct vm_operations_struct radeon_ttm_vm_ops;
42893+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
42894 static const struct vm_operations_struct *ttm_vm_ops = NULL;
42895
42896 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42897@@ -838,8 +838,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
42898 }
42899 if (unlikely(ttm_vm_ops == NULL)) {
42900 ttm_vm_ops = vma->vm_ops;
42901+ pax_open_kernel();
42902 radeon_ttm_vm_ops = *ttm_vm_ops;
42903 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
42904+ pax_close_kernel();
42905 }
42906 vma->vm_ops = &radeon_ttm_vm_ops;
42907 return 0;
42908diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
42909index ef40381..347463e 100644
42910--- a/drivers/gpu/drm/tegra/dc.c
42911+++ b/drivers/gpu/drm/tegra/dc.c
42912@@ -1173,7 +1173,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
42913 }
42914
42915 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
42916- dc->debugfs_files[i].data = dc;
42917+ *(void **)&dc->debugfs_files[i].data = dc;
42918
42919 err = drm_debugfs_create_files(dc->debugfs_files,
42920 ARRAY_SIZE(debugfs_files),
42921diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
42922index bd56f2a..255af4b 100644
42923--- a/drivers/gpu/drm/tegra/dsi.c
42924+++ b/drivers/gpu/drm/tegra/dsi.c
42925@@ -41,7 +41,7 @@ struct tegra_dsi {
42926 struct clk *clk_lp;
42927 struct clk *clk;
42928
42929- struct drm_info_list *debugfs_files;
42930+ drm_info_list_no_const *debugfs_files;
42931 struct drm_minor *minor;
42932 struct dentry *debugfs;
42933
42934diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
42935index ba067bb..23afbbd 100644
42936--- a/drivers/gpu/drm/tegra/hdmi.c
42937+++ b/drivers/gpu/drm/tegra/hdmi.c
42938@@ -60,7 +60,7 @@ struct tegra_hdmi {
42939 bool stereo;
42940 bool dvi;
42941
42942- struct drm_info_list *debugfs_files;
42943+ drm_info_list_no_const *debugfs_files;
42944 struct drm_minor *minor;
42945 struct dentry *debugfs;
42946 };
42947diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42948index bd850c9..d9f3573 100644
42949--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
42950+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42951@@ -146,10 +146,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
42952 }
42953
42954 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
42955- ttm_bo_man_init,
42956- ttm_bo_man_takedown,
42957- ttm_bo_man_get_node,
42958- ttm_bo_man_put_node,
42959- ttm_bo_man_debug
42960+ .init = ttm_bo_man_init,
42961+ .takedown = ttm_bo_man_takedown,
42962+ .get_node = ttm_bo_man_get_node,
42963+ .put_node = ttm_bo_man_put_node,
42964+ .debug = ttm_bo_man_debug
42965 };
42966 EXPORT_SYMBOL(ttm_bo_manager_func);
42967diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
42968index dbc2def..0a9f710 100644
42969--- a/drivers/gpu/drm/ttm/ttm_memory.c
42970+++ b/drivers/gpu/drm/ttm/ttm_memory.c
42971@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
42972 zone->glob = glob;
42973 glob->zone_kernel = zone;
42974 ret = kobject_init_and_add(
42975- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42976+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42977 if (unlikely(ret != 0)) {
42978 kobject_put(&zone->kobj);
42979 return ret;
42980@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
42981 zone->glob = glob;
42982 glob->zone_dma32 = zone;
42983 ret = kobject_init_and_add(
42984- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42985+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42986 if (unlikely(ret != 0)) {
42987 kobject_put(&zone->kobj);
42988 return ret;
42989diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
42990index 863bef9..cba15cf 100644
42991--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
42992+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
42993@@ -391,9 +391,9 @@ out:
42994 static unsigned long
42995 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42996 {
42997- static atomic_t start_pool = ATOMIC_INIT(0);
42998+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
42999 unsigned i;
43000- unsigned pool_offset = atomic_add_return(1, &start_pool);
43001+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
43002 struct ttm_page_pool *pool;
43003 int shrink_pages = sc->nr_to_scan;
43004 unsigned long freed = 0;
43005diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
43006index 3771763..883f206 100644
43007--- a/drivers/gpu/drm/udl/udl_fb.c
43008+++ b/drivers/gpu/drm/udl/udl_fb.c
43009@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
43010 fb_deferred_io_cleanup(info);
43011 kfree(info->fbdefio);
43012 info->fbdefio = NULL;
43013- info->fbops->fb_mmap = udl_fb_mmap;
43014 }
43015
43016 pr_warn("released /dev/fb%d user=%d count=%d\n",
43017diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
43018index ad02732..144f5ed 100644
43019--- a/drivers/gpu/drm/via/via_drv.h
43020+++ b/drivers/gpu/drm/via/via_drv.h
43021@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
43022 typedef uint32_t maskarray_t[5];
43023
43024 typedef struct drm_via_irq {
43025- atomic_t irq_received;
43026+ atomic_unchecked_t irq_received;
43027 uint32_t pending_mask;
43028 uint32_t enable_mask;
43029 wait_queue_head_t irq_queue;
43030@@ -75,7 +75,7 @@ typedef struct drm_via_private {
43031 struct timeval last_vblank;
43032 int last_vblank_valid;
43033 unsigned usec_per_vblank;
43034- atomic_t vbl_received;
43035+ atomic_unchecked_t vbl_received;
43036 drm_via_state_t hc_state;
43037 char pci_buf[VIA_PCI_BUF_SIZE];
43038 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
43039diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
43040index 1319433..a993b0c 100644
43041--- a/drivers/gpu/drm/via/via_irq.c
43042+++ b/drivers/gpu/drm/via/via_irq.c
43043@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
43044 if (crtc != 0)
43045 return 0;
43046
43047- return atomic_read(&dev_priv->vbl_received);
43048+ return atomic_read_unchecked(&dev_priv->vbl_received);
43049 }
43050
43051 irqreturn_t via_driver_irq_handler(int irq, void *arg)
43052@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43053
43054 status = VIA_READ(VIA_REG_INTERRUPT);
43055 if (status & VIA_IRQ_VBLANK_PENDING) {
43056- atomic_inc(&dev_priv->vbl_received);
43057- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
43058+ atomic_inc_unchecked(&dev_priv->vbl_received);
43059+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
43060 do_gettimeofday(&cur_vblank);
43061 if (dev_priv->last_vblank_valid) {
43062 dev_priv->usec_per_vblank =
43063@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43064 dev_priv->last_vblank = cur_vblank;
43065 dev_priv->last_vblank_valid = 1;
43066 }
43067- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
43068+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
43069 DRM_DEBUG("US per vblank is: %u\n",
43070 dev_priv->usec_per_vblank);
43071 }
43072@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43073
43074 for (i = 0; i < dev_priv->num_irqs; ++i) {
43075 if (status & cur_irq->pending_mask) {
43076- atomic_inc(&cur_irq->irq_received);
43077+ atomic_inc_unchecked(&cur_irq->irq_received);
43078 wake_up(&cur_irq->irq_queue);
43079 handled = 1;
43080 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
43081@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
43082 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
43083 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
43084 masks[irq][4]));
43085- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
43086+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
43087 } else {
43088 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
43089 (((cur_irq_sequence =
43090- atomic_read(&cur_irq->irq_received)) -
43091+ atomic_read_unchecked(&cur_irq->irq_received)) -
43092 *sequence) <= (1 << 23)));
43093 }
43094 *sequence = cur_irq_sequence;
43095@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
43096 }
43097
43098 for (i = 0; i < dev_priv->num_irqs; ++i) {
43099- atomic_set(&cur_irq->irq_received, 0);
43100+ atomic_set_unchecked(&cur_irq->irq_received, 0);
43101 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
43102 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
43103 init_waitqueue_head(&cur_irq->irq_queue);
43104@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
43105 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
43106 case VIA_IRQ_RELATIVE:
43107 irqwait->request.sequence +=
43108- atomic_read(&cur_irq->irq_received);
43109+ atomic_read_unchecked(&cur_irq->irq_received);
43110 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
43111 case VIA_IRQ_ABSOLUTE:
43112 break;
43113diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43114index 6b252a8..5975dfe 100644
43115--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43116+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43117@@ -437,7 +437,7 @@ struct vmw_private {
43118 * Fencing and IRQs.
43119 */
43120
43121- atomic_t marker_seq;
43122+ atomic_unchecked_t marker_seq;
43123 wait_queue_head_t fence_queue;
43124 wait_queue_head_t fifo_queue;
43125 int fence_queue_waiters; /* Protected by hw_mutex */
43126diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43127index 6ccd993..618d592 100644
43128--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43129+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43130@@ -154,7 +154,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
43131 (unsigned int) min,
43132 (unsigned int) fifo->capabilities);
43133
43134- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
43135+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
43136 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
43137 vmw_marker_queue_init(&fifo->marker_queue);
43138 return vmw_fifo_send_fence(dev_priv, &dummy);
43139@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
43140 if (reserveable)
43141 iowrite32(bytes, fifo_mem +
43142 SVGA_FIFO_RESERVED);
43143- return fifo_mem + (next_cmd >> 2);
43144+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
43145 } else {
43146 need_bounce = true;
43147 }
43148@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
43149
43150 fm = vmw_fifo_reserve(dev_priv, bytes);
43151 if (unlikely(fm == NULL)) {
43152- *seqno = atomic_read(&dev_priv->marker_seq);
43153+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
43154 ret = -ENOMEM;
43155 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
43156 false, 3*HZ);
43157@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
43158 }
43159
43160 do {
43161- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
43162+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
43163 } while (*seqno == 0);
43164
43165 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
43166diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43167index b1273e8..9c274fd 100644
43168--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43169+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43170@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
43171 }
43172
43173 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
43174- vmw_gmrid_man_init,
43175- vmw_gmrid_man_takedown,
43176- vmw_gmrid_man_get_node,
43177- vmw_gmrid_man_put_node,
43178- vmw_gmrid_man_debug
43179+ .init = vmw_gmrid_man_init,
43180+ .takedown = vmw_gmrid_man_takedown,
43181+ .get_node = vmw_gmrid_man_get_node,
43182+ .put_node = vmw_gmrid_man_put_node,
43183+ .debug = vmw_gmrid_man_debug
43184 };
43185diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43186index 37881ec..319065d 100644
43187--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43188+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43189@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
43190 int ret;
43191
43192 num_clips = arg->num_clips;
43193- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
43194+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
43195
43196 if (unlikely(num_clips == 0))
43197 return 0;
43198@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
43199 int ret;
43200
43201 num_clips = arg->num_clips;
43202- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
43203+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
43204
43205 if (unlikely(num_clips == 0))
43206 return 0;
43207diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43208index 0c42376..6febe77 100644
43209--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43210+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43211@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
43212 * emitted. Then the fence is stale and signaled.
43213 */
43214
43215- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
43216+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
43217 > VMW_FENCE_WRAP);
43218
43219 return ret;
43220@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
43221
43222 if (fifo_idle)
43223 down_read(&fifo_state->rwsem);
43224- signal_seq = atomic_read(&dev_priv->marker_seq);
43225+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
43226 ret = 0;
43227
43228 for (;;) {
43229diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43230index 8a8725c2..afed796 100644
43231--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43232+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43233@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
43234 while (!vmw_lag_lt(queue, us)) {
43235 spin_lock(&queue->lock);
43236 if (list_empty(&queue->head))
43237- seqno = atomic_read(&dev_priv->marker_seq);
43238+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
43239 else {
43240 marker = list_first_entry(&queue->head,
43241 struct vmw_marker, head);
43242diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
43243index 6866448..2ad2b34 100644
43244--- a/drivers/gpu/vga/vga_switcheroo.c
43245+++ b/drivers/gpu/vga/vga_switcheroo.c
43246@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
43247
43248 /* this version is for the case where the power switch is separate
43249 to the device being powered down. */
43250-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
43251+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
43252 {
43253 /* copy over all the bus versions */
43254 if (dev->bus && dev->bus->pm) {
43255@@ -689,7 +689,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
43256 return ret;
43257 }
43258
43259-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
43260+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
43261 {
43262 /* copy over all the bus versions */
43263 if (dev->bus && dev->bus->pm) {
43264diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
43265index 1bdcccc..f745d2c 100644
43266--- a/drivers/hid/hid-cherry.c
43267+++ b/drivers/hid/hid-cherry.c
43268@@ -28,7 +28,7 @@
43269 static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
43270 unsigned int *rsize)
43271 {
43272- if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
43273+ if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
43274 hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n");
43275 rdesc[11] = rdesc[16] = 0xff;
43276 rdesc[12] = rdesc[17] = 0x03;
43277diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
43278index 8ed66fd..38ff772 100644
43279--- a/drivers/hid/hid-core.c
43280+++ b/drivers/hid/hid-core.c
43281@@ -2488,7 +2488,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
43282
43283 int hid_add_device(struct hid_device *hdev)
43284 {
43285- static atomic_t id = ATOMIC_INIT(0);
43286+ static atomic_unchecked_t id = ATOMIC_INIT(0);
43287 int ret;
43288
43289 if (WARN_ON(hdev->status & HID_STAT_ADDED))
43290@@ -2530,7 +2530,7 @@ int hid_add_device(struct hid_device *hdev)
43291 /* XXX hack, any other cleaner solution after the driver core
43292 * is converted to allow more than 20 bytes as the device name? */
43293 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
43294- hdev->vendor, hdev->product, atomic_inc_return(&id));
43295+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
43296
43297 hid_debug_register(hdev, dev_name(&hdev->dev));
43298 ret = device_add(&hdev->dev);
43299diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
43300index e776963..b92bf01 100644
43301--- a/drivers/hid/hid-kye.c
43302+++ b/drivers/hid/hid-kye.c
43303@@ -300,7 +300,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
43304 * - change the button usage range to 4-7 for the extra
43305 * buttons
43306 */
43307- if (*rsize >= 74 &&
43308+ if (*rsize >= 75 &&
43309 rdesc[61] == 0x05 && rdesc[62] == 0x08 &&
43310 rdesc[63] == 0x19 && rdesc[64] == 0x08 &&
43311 rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
43312diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
43313index a976f48..f91ff14 100644
43314--- a/drivers/hid/hid-lg.c
43315+++ b/drivers/hid/hid-lg.c
43316@@ -345,14 +345,14 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
43317 struct usb_device_descriptor *udesc;
43318 __u16 bcdDevice, rev_maj, rev_min;
43319
43320- if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
43321+ if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 &&
43322 rdesc[84] == 0x8c && rdesc[85] == 0x02) {
43323 hid_info(hdev,
43324 "fixing up Logitech keyboard report descriptor\n");
43325 rdesc[84] = rdesc[89] = 0x4d;
43326 rdesc[85] = rdesc[90] = 0x10;
43327 }
43328- if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 50 &&
43329+ if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 &&
43330 rdesc[32] == 0x81 && rdesc[33] == 0x06 &&
43331 rdesc[49] == 0x81 && rdesc[50] == 0x06) {
43332 hid_info(hdev,
43333diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
43334index 486dbde..b7ba829 100644
43335--- a/drivers/hid/hid-logitech-dj.c
43336+++ b/drivers/hid/hid-logitech-dj.c
43337@@ -238,13 +238,6 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
43338 return;
43339 }
43340
43341- if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
43342- (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
43343- dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n",
43344- __func__, dj_report->device_index);
43345- return;
43346- }
43347-
43348 if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
43349 /* The device is already known. No need to reallocate it. */
43350 dbg_hid("%s: device is already known\n", __func__);
43351@@ -557,7 +550,7 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
43352 if (!out_buf)
43353 return -ENOMEM;
43354
43355- if (count < DJREPORT_SHORT_LENGTH - 2)
43356+ if (count > DJREPORT_SHORT_LENGTH - 2)
43357 count = DJREPORT_SHORT_LENGTH - 2;
43358
43359 out_buf[0] = REPORT_ID_DJ_SHORT;
43360@@ -690,6 +683,12 @@ static int logi_dj_raw_event(struct hid_device *hdev,
43361 * device (via hid_input_report() ) and return 1 so hid-core does not do
43362 * anything else with it.
43363 */
43364+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
43365+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
43366+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
43367+ __func__, dj_report->device_index);
43368+ return false;
43369+ }
43370
43371 spin_lock_irqsave(&djrcv_dev->lock, flags);
43372 if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
43373diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
43374index ecc2cbf..29a74c1 100644
43375--- a/drivers/hid/hid-magicmouse.c
43376+++ b/drivers/hid/hid-magicmouse.c
43377@@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
43378 if (size < 4 || ((size - 4) % 9) != 0)
43379 return 0;
43380 npoints = (size - 4) / 9;
43381+ if (npoints > 15) {
43382+ hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
43383+ size);
43384+ return 0;
43385+ }
43386 msc->ntouches = 0;
43387 for (ii = 0; ii < npoints; ii++)
43388 magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
43389@@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
43390 if (size < 6 || ((size - 6) % 8) != 0)
43391 return 0;
43392 npoints = (size - 6) / 8;
43393+ if (npoints > 15) {
43394+ hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
43395+ size);
43396+ return 0;
43397+ }
43398 msc->ntouches = 0;
43399 for (ii = 0; ii < npoints; ii++)
43400 magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
43401diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c
43402index 9e14c00..25daf28 100644
43403--- a/drivers/hid/hid-monterey.c
43404+++ b/drivers/hid/hid-monterey.c
43405@@ -24,7 +24,7 @@
43406 static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
43407 unsigned int *rsize)
43408 {
43409- if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
43410+ if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
43411 hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
43412 rdesc[30] = 0x0c;
43413 }
43414diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c
43415index 736b250..6aca4f2 100644
43416--- a/drivers/hid/hid-petalynx.c
43417+++ b/drivers/hid/hid-petalynx.c
43418@@ -25,7 +25,7 @@
43419 static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
43420 unsigned int *rsize)
43421 {
43422- if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
43423+ if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
43424 rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
43425 rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
43426 hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n");
43427diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
43428index acbb0210..020df3c 100644
43429--- a/drivers/hid/hid-picolcd_core.c
43430+++ b/drivers/hid/hid-picolcd_core.c
43431@@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev,
43432 if (!data)
43433 return 1;
43434
43435+ if (size > 64) {
43436+ hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n",
43437+ size);
43438+ return 0;
43439+ }
43440+
43441 if (report->id == REPORT_KEY_STATE) {
43442 if (data->input_keys)
43443 ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
43444diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c
43445index 87fc91e..91072fa 100644
43446--- a/drivers/hid/hid-sunplus.c
43447+++ b/drivers/hid/hid-sunplus.c
43448@@ -24,7 +24,7 @@
43449 static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
43450 unsigned int *rsize)
43451 {
43452- if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
43453+ if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
43454 rdesc[106] == 0x03) {
43455 hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n");
43456 rdesc[105] = rdesc[110] = 0x03;
43457diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
43458index c13fb5b..55a3802 100644
43459--- a/drivers/hid/hid-wiimote-debug.c
43460+++ b/drivers/hid/hid-wiimote-debug.c
43461@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
43462 else if (size == 0)
43463 return -EIO;
43464
43465- if (copy_to_user(u, buf, size))
43466+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
43467 return -EFAULT;
43468
43469 *off += size;
43470diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
43471index 0cb92e3..c7d453d 100644
43472--- a/drivers/hid/uhid.c
43473+++ b/drivers/hid/uhid.c
43474@@ -47,7 +47,7 @@ struct uhid_device {
43475 struct mutex report_lock;
43476 wait_queue_head_t report_wait;
43477 atomic_t report_done;
43478- atomic_t report_id;
43479+ atomic_unchecked_t report_id;
43480 struct uhid_event report_buf;
43481 };
43482
43483@@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
43484
43485 spin_lock_irqsave(&uhid->qlock, flags);
43486 ev->type = UHID_FEATURE;
43487- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
43488+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
43489 ev->u.feature.rnum = rnum;
43490 ev->u.feature.rtype = report_type;
43491
43492@@ -538,7 +538,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
43493 spin_lock_irqsave(&uhid->qlock, flags);
43494
43495 /* id for old report; drop it silently */
43496- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
43497+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
43498 goto unlock;
43499 if (atomic_read(&uhid->report_done))
43500 goto unlock;
43501diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
43502index 284cf66..084c627 100644
43503--- a/drivers/hv/channel.c
43504+++ b/drivers/hv/channel.c
43505@@ -365,8 +365,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
43506 int ret = 0;
43507 int t;
43508
43509- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
43510- atomic_inc(&vmbus_connection.next_gpadl_handle);
43511+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
43512+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
43513
43514 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
43515 if (ret)
43516diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
43517index edfc848..d83e195 100644
43518--- a/drivers/hv/hv.c
43519+++ b/drivers/hv/hv.c
43520@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
43521 u64 output_address = (output) ? virt_to_phys(output) : 0;
43522 u32 output_address_hi = output_address >> 32;
43523 u32 output_address_lo = output_address & 0xFFFFFFFF;
43524- void *hypercall_page = hv_context.hypercall_page;
43525+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
43526
43527 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
43528 "=a"(hv_status_lo) : "d" (control_hi),
43529@@ -154,7 +154,7 @@ int hv_init(void)
43530 /* See if the hypercall page is already set */
43531 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
43532
43533- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
43534+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
43535
43536 if (!virtaddr)
43537 goto cleanup;
43538diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
43539index 5e90c5d..d8fcefb 100644
43540--- a/drivers/hv/hv_balloon.c
43541+++ b/drivers/hv/hv_balloon.c
43542@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
43543
43544 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
43545 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
43546-static atomic_t trans_id = ATOMIC_INIT(0);
43547+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
43548
43549 static int dm_ring_size = (5 * PAGE_SIZE);
43550
43551@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
43552 pr_info("Memory hot add failed\n");
43553
43554 dm->state = DM_INITIALIZED;
43555- resp.hdr.trans_id = atomic_inc_return(&trans_id);
43556+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43557 vmbus_sendpacket(dm->dev->channel, &resp,
43558 sizeof(struct dm_hot_add_response),
43559 (unsigned long)NULL,
43560@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
43561 memset(&status, 0, sizeof(struct dm_status));
43562 status.hdr.type = DM_STATUS_REPORT;
43563 status.hdr.size = sizeof(struct dm_status);
43564- status.hdr.trans_id = atomic_inc_return(&trans_id);
43565+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43566
43567 /*
43568 * The host expects the guest to report free memory.
43569@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
43570 * send the status. This can happen if we were interrupted
43571 * after we picked our transaction ID.
43572 */
43573- if (status.hdr.trans_id != atomic_read(&trans_id))
43574+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
43575 return;
43576
43577 /*
43578@@ -1129,7 +1129,7 @@ static void balloon_up(struct work_struct *dummy)
43579 */
43580
43581 do {
43582- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
43583+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43584 ret = vmbus_sendpacket(dm_device.dev->channel,
43585 bl_resp,
43586 bl_resp->hdr.size,
43587@@ -1175,7 +1175,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
43588
43589 memset(&resp, 0, sizeof(struct dm_unballoon_response));
43590 resp.hdr.type = DM_UNBALLOON_RESPONSE;
43591- resp.hdr.trans_id = atomic_inc_return(&trans_id);
43592+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43593 resp.hdr.size = sizeof(struct dm_unballoon_response);
43594
43595 vmbus_sendpacket(dm_device.dev->channel, &resp,
43596@@ -1239,7 +1239,7 @@ static void version_resp(struct hv_dynmem_device *dm,
43597 memset(&version_req, 0, sizeof(struct dm_version_request));
43598 version_req.hdr.type = DM_VERSION_REQUEST;
43599 version_req.hdr.size = sizeof(struct dm_version_request);
43600- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
43601+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43602 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
43603 version_req.is_last_attempt = 1;
43604
43605@@ -1409,7 +1409,7 @@ static int balloon_probe(struct hv_device *dev,
43606 memset(&version_req, 0, sizeof(struct dm_version_request));
43607 version_req.hdr.type = DM_VERSION_REQUEST;
43608 version_req.hdr.size = sizeof(struct dm_version_request);
43609- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
43610+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43611 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
43612 version_req.is_last_attempt = 0;
43613
43614@@ -1440,7 +1440,7 @@ static int balloon_probe(struct hv_device *dev,
43615 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
43616 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
43617 cap_msg.hdr.size = sizeof(struct dm_capabilities);
43618- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
43619+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43620
43621 cap_msg.caps.cap_bits.balloon = 1;
43622 cap_msg.caps.cap_bits.hot_add = 1;
43623diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
43624index 22b7507..fc2fc47 100644
43625--- a/drivers/hv/hyperv_vmbus.h
43626+++ b/drivers/hv/hyperv_vmbus.h
43627@@ -607,7 +607,7 @@ enum vmbus_connect_state {
43628 struct vmbus_connection {
43629 enum vmbus_connect_state conn_state;
43630
43631- atomic_t next_gpadl_handle;
43632+ atomic_unchecked_t next_gpadl_handle;
43633
43634 /*
43635 * Represents channel interrupts. Each bit position represents a
43636diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
43637index 4d6b269..2e23b86 100644
43638--- a/drivers/hv/vmbus_drv.c
43639+++ b/drivers/hv/vmbus_drv.c
43640@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
43641 {
43642 int ret = 0;
43643
43644- static atomic_t device_num = ATOMIC_INIT(0);
43645+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
43646
43647 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
43648- atomic_inc_return(&device_num));
43649+ atomic_inc_return_unchecked(&device_num));
43650
43651 child_device_obj->device.bus = &hv_bus;
43652 child_device_obj->device.parent = &hv_acpi_dev->dev;
43653diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
43654index 579bdf9..75118b5 100644
43655--- a/drivers/hwmon/acpi_power_meter.c
43656+++ b/drivers/hwmon/acpi_power_meter.c
43657@@ -116,7 +116,7 @@ struct sensor_template {
43658 struct device_attribute *devattr,
43659 const char *buf, size_t count);
43660 int index;
43661-};
43662+} __do_const;
43663
43664 /* Averaging interval */
43665 static int update_avg_interval(struct acpi_power_meter_resource *resource)
43666@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
43667 struct sensor_template *attrs)
43668 {
43669 struct device *dev = &resource->acpi_dev->dev;
43670- struct sensor_device_attribute *sensors =
43671+ sensor_device_attribute_no_const *sensors =
43672 &resource->sensors[resource->num_sensors];
43673 int res = 0;
43674
43675diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
43676index 3288f13..71cfb4e 100644
43677--- a/drivers/hwmon/applesmc.c
43678+++ b/drivers/hwmon/applesmc.c
43679@@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
43680 {
43681 struct applesmc_node_group *grp;
43682 struct applesmc_dev_attr *node;
43683- struct attribute *attr;
43684+ attribute_no_const *attr;
43685 int ret, i;
43686
43687 for (grp = groups; grp->format; grp++) {
43688diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
43689index ae208f6..48b6c5b 100644
43690--- a/drivers/hwmon/asus_atk0110.c
43691+++ b/drivers/hwmon/asus_atk0110.c
43692@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
43693 struct atk_sensor_data {
43694 struct list_head list;
43695 struct atk_data *data;
43696- struct device_attribute label_attr;
43697- struct device_attribute input_attr;
43698- struct device_attribute limit1_attr;
43699- struct device_attribute limit2_attr;
43700+ device_attribute_no_const label_attr;
43701+ device_attribute_no_const input_attr;
43702+ device_attribute_no_const limit1_attr;
43703+ device_attribute_no_const limit2_attr;
43704 char label_attr_name[ATTR_NAME_SIZE];
43705 char input_attr_name[ATTR_NAME_SIZE];
43706 char limit1_attr_name[ATTR_NAME_SIZE];
43707@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
43708 static struct device_attribute atk_name_attr =
43709 __ATTR(name, 0444, atk_name_show, NULL);
43710
43711-static void atk_init_attribute(struct device_attribute *attr, char *name,
43712+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
43713 sysfs_show_func show)
43714 {
43715 sysfs_attr_init(&attr->attr);
43716diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
43717index d76f0b7..55ae976 100644
43718--- a/drivers/hwmon/coretemp.c
43719+++ b/drivers/hwmon/coretemp.c
43720@@ -784,7 +784,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
43721 return NOTIFY_OK;
43722 }
43723
43724-static struct notifier_block coretemp_cpu_notifier __refdata = {
43725+static struct notifier_block coretemp_cpu_notifier = {
43726 .notifier_call = coretemp_cpu_callback,
43727 };
43728
43729diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
43730index 632f1dc..57e6a58 100644
43731--- a/drivers/hwmon/ibmaem.c
43732+++ b/drivers/hwmon/ibmaem.c
43733@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
43734 struct aem_rw_sensor_template *rw)
43735 {
43736 struct device *dev = &data->pdev->dev;
43737- struct sensor_device_attribute *sensors = data->sensors;
43738+ sensor_device_attribute_no_const *sensors = data->sensors;
43739 int err;
43740
43741 /* Set up read-only sensors */
43742diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
43743index 14c82da..09b25d7 100644
43744--- a/drivers/hwmon/iio_hwmon.c
43745+++ b/drivers/hwmon/iio_hwmon.c
43746@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
43747 {
43748 struct device *dev = &pdev->dev;
43749 struct iio_hwmon_state *st;
43750- struct sensor_device_attribute *a;
43751+ sensor_device_attribute_no_const *a;
43752 int ret, i;
43753 int in_i = 1, temp_i = 1, curr_i = 1;
43754 enum iio_chan_type type;
43755diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
43756index 7710f46..427a28d 100644
43757--- a/drivers/hwmon/nct6683.c
43758+++ b/drivers/hwmon/nct6683.c
43759@@ -397,11 +397,11 @@ static struct attribute_group *
43760 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
43761 int repeat)
43762 {
43763- struct sensor_device_attribute_2 *a2;
43764- struct sensor_device_attribute *a;
43765+ sensor_device_attribute_2_no_const *a2;
43766+ sensor_device_attribute_no_const *a;
43767 struct sensor_device_template **t;
43768 struct sensor_device_attr_u *su;
43769- struct attribute_group *group;
43770+ attribute_group_no_const *group;
43771 struct attribute **attrs;
43772 int i, j, count;
43773
43774diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
43775index 59d9a3f..2298fa4 100644
43776--- a/drivers/hwmon/nct6775.c
43777+++ b/drivers/hwmon/nct6775.c
43778@@ -944,10 +944,10 @@ static struct attribute_group *
43779 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
43780 int repeat)
43781 {
43782- struct attribute_group *group;
43783+ attribute_group_no_const *group;
43784 struct sensor_device_attr_u *su;
43785- struct sensor_device_attribute *a;
43786- struct sensor_device_attribute_2 *a2;
43787+ sensor_device_attribute_no_const *a;
43788+ sensor_device_attribute_2_no_const *a2;
43789 struct attribute **attrs;
43790 struct sensor_device_template **t;
43791 int i, count;
43792diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
43793index 291d11f..3f0dbbd 100644
43794--- a/drivers/hwmon/pmbus/pmbus_core.c
43795+++ b/drivers/hwmon/pmbus/pmbus_core.c
43796@@ -783,7 +783,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
43797 return 0;
43798 }
43799
43800-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
43801+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
43802 const char *name,
43803 umode_t mode,
43804 ssize_t (*show)(struct device *dev,
43805@@ -800,7 +800,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
43806 dev_attr->store = store;
43807 }
43808
43809-static void pmbus_attr_init(struct sensor_device_attribute *a,
43810+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
43811 const char *name,
43812 umode_t mode,
43813 ssize_t (*show)(struct device *dev,
43814@@ -822,7 +822,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
43815 u16 reg, u8 mask)
43816 {
43817 struct pmbus_boolean *boolean;
43818- struct sensor_device_attribute *a;
43819+ sensor_device_attribute_no_const *a;
43820
43821 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
43822 if (!boolean)
43823@@ -847,7 +847,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
43824 bool update, bool readonly)
43825 {
43826 struct pmbus_sensor *sensor;
43827- struct device_attribute *a;
43828+ device_attribute_no_const *a;
43829
43830 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
43831 if (!sensor)
43832@@ -878,7 +878,7 @@ static int pmbus_add_label(struct pmbus_data *data,
43833 const char *lstring, int index)
43834 {
43835 struct pmbus_label *label;
43836- struct device_attribute *a;
43837+ device_attribute_no_const *a;
43838
43839 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
43840 if (!label)
43841diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
43842index 97cd45a..ac54d8b 100644
43843--- a/drivers/hwmon/sht15.c
43844+++ b/drivers/hwmon/sht15.c
43845@@ -169,7 +169,7 @@ struct sht15_data {
43846 int supply_uv;
43847 bool supply_uv_valid;
43848 struct work_struct update_supply_work;
43849- atomic_t interrupt_handled;
43850+ atomic_unchecked_t interrupt_handled;
43851 };
43852
43853 /**
43854@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
43855 ret = gpio_direction_input(data->pdata->gpio_data);
43856 if (ret)
43857 return ret;
43858- atomic_set(&data->interrupt_handled, 0);
43859+ atomic_set_unchecked(&data->interrupt_handled, 0);
43860
43861 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43862 if (gpio_get_value(data->pdata->gpio_data) == 0) {
43863 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
43864 /* Only relevant if the interrupt hasn't occurred. */
43865- if (!atomic_read(&data->interrupt_handled))
43866+ if (!atomic_read_unchecked(&data->interrupt_handled))
43867 schedule_work(&data->read_work);
43868 }
43869 ret = wait_event_timeout(data->wait_queue,
43870@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
43871
43872 /* First disable the interrupt */
43873 disable_irq_nosync(irq);
43874- atomic_inc(&data->interrupt_handled);
43875+ atomic_inc_unchecked(&data->interrupt_handled);
43876 /* Then schedule a reading work struct */
43877 if (data->state != SHT15_READING_NOTHING)
43878 schedule_work(&data->read_work);
43879@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
43880 * If not, then start the interrupt again - care here as could
43881 * have gone low in meantime so verify it hasn't!
43882 */
43883- atomic_set(&data->interrupt_handled, 0);
43884+ atomic_set_unchecked(&data->interrupt_handled, 0);
43885 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43886 /* If still not occurred or another handler was scheduled */
43887 if (gpio_get_value(data->pdata->gpio_data)
43888- || atomic_read(&data->interrupt_handled))
43889+ || atomic_read_unchecked(&data->interrupt_handled))
43890 return;
43891 }
43892
43893diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
43894index 8df43c5..b07b91d 100644
43895--- a/drivers/hwmon/via-cputemp.c
43896+++ b/drivers/hwmon/via-cputemp.c
43897@@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
43898 return NOTIFY_OK;
43899 }
43900
43901-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
43902+static struct notifier_block via_cputemp_cpu_notifier = {
43903 .notifier_call = via_cputemp_cpu_callback,
43904 };
43905
43906diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
43907index 41fc683..a39cfea 100644
43908--- a/drivers/i2c/busses/i2c-amd756-s4882.c
43909+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
43910@@ -43,7 +43,7 @@
43911 extern struct i2c_adapter amd756_smbus;
43912
43913 static struct i2c_adapter *s4882_adapter;
43914-static struct i2c_algorithm *s4882_algo;
43915+static i2c_algorithm_no_const *s4882_algo;
43916
43917 /* Wrapper access functions for multiplexed SMBus */
43918 static DEFINE_MUTEX(amd756_lock);
43919diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
43920index b19a310..d6eece0 100644
43921--- a/drivers/i2c/busses/i2c-diolan-u2c.c
43922+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
43923@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
43924 /* usb layer */
43925
43926 /* Send command to device, and get response. */
43927-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43928+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43929 {
43930 int ret = 0;
43931 int actual;
43932diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
43933index b170bdf..3c76427 100644
43934--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
43935+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
43936@@ -41,7 +41,7 @@
43937 extern struct i2c_adapter *nforce2_smbus;
43938
43939 static struct i2c_adapter *s4985_adapter;
43940-static struct i2c_algorithm *s4985_algo;
43941+static i2c_algorithm_no_const *s4985_algo;
43942
43943 /* Wrapper access functions for multiplexed SMBus */
43944 static DEFINE_MUTEX(nforce2_lock);
43945diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
43946index 80b47e8..1a6040d9 100644
43947--- a/drivers/i2c/i2c-dev.c
43948+++ b/drivers/i2c/i2c-dev.c
43949@@ -277,7 +277,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
43950 break;
43951 }
43952
43953- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
43954+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
43955 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
43956 if (IS_ERR(rdwr_pa[i].buf)) {
43957 res = PTR_ERR(rdwr_pa[i].buf);
43958diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
43959index 0b510ba..4fbb5085 100644
43960--- a/drivers/ide/ide-cd.c
43961+++ b/drivers/ide/ide-cd.c
43962@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
43963 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
43964 if ((unsigned long)buf & alignment
43965 || blk_rq_bytes(rq) & q->dma_pad_mask
43966- || object_is_on_stack(buf))
43967+ || object_starts_on_stack(buf))
43968 drive->dma = 0;
43969 }
43970 }
43971diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
43972index 4b1f375..770b95f 100644
43973--- a/drivers/iio/industrialio-core.c
43974+++ b/drivers/iio/industrialio-core.c
43975@@ -551,7 +551,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
43976 }
43977
43978 static
43979-int __iio_device_attr_init(struct device_attribute *dev_attr,
43980+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
43981 const char *postfix,
43982 struct iio_chan_spec const *chan,
43983 ssize_t (*readfunc)(struct device *dev,
43984diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
43985index c323917..6ddea8b 100644
43986--- a/drivers/infiniband/core/cm.c
43987+++ b/drivers/infiniband/core/cm.c
43988@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
43989
43990 struct cm_counter_group {
43991 struct kobject obj;
43992- atomic_long_t counter[CM_ATTR_COUNT];
43993+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
43994 };
43995
43996 struct cm_counter_attribute {
43997@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
43998 struct ib_mad_send_buf *msg = NULL;
43999 int ret;
44000
44001- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44002+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44003 counter[CM_REQ_COUNTER]);
44004
44005 /* Quick state check to discard duplicate REQs. */
44006@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
44007 if (!cm_id_priv)
44008 return;
44009
44010- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44011+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44012 counter[CM_REP_COUNTER]);
44013 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
44014 if (ret)
44015@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
44016 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
44017 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
44018 spin_unlock_irq(&cm_id_priv->lock);
44019- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44020+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44021 counter[CM_RTU_COUNTER]);
44022 goto out;
44023 }
44024@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
44025 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
44026 dreq_msg->local_comm_id);
44027 if (!cm_id_priv) {
44028- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44029+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44030 counter[CM_DREQ_COUNTER]);
44031 cm_issue_drep(work->port, work->mad_recv_wc);
44032 return -EINVAL;
44033@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
44034 case IB_CM_MRA_REP_RCVD:
44035 break;
44036 case IB_CM_TIMEWAIT:
44037- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44038+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44039 counter[CM_DREQ_COUNTER]);
44040 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
44041 goto unlock;
44042@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
44043 cm_free_msg(msg);
44044 goto deref;
44045 case IB_CM_DREQ_RCVD:
44046- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44047+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44048 counter[CM_DREQ_COUNTER]);
44049 goto unlock;
44050 default:
44051@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
44052 ib_modify_mad(cm_id_priv->av.port->mad_agent,
44053 cm_id_priv->msg, timeout)) {
44054 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
44055- atomic_long_inc(&work->port->
44056+ atomic_long_inc_unchecked(&work->port->
44057 counter_group[CM_RECV_DUPLICATES].
44058 counter[CM_MRA_COUNTER]);
44059 goto out;
44060@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
44061 break;
44062 case IB_CM_MRA_REQ_RCVD:
44063 case IB_CM_MRA_REP_RCVD:
44064- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44065+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44066 counter[CM_MRA_COUNTER]);
44067 /* fall through */
44068 default:
44069@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
44070 case IB_CM_LAP_IDLE:
44071 break;
44072 case IB_CM_MRA_LAP_SENT:
44073- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44074+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44075 counter[CM_LAP_COUNTER]);
44076 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
44077 goto unlock;
44078@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
44079 cm_free_msg(msg);
44080 goto deref;
44081 case IB_CM_LAP_RCVD:
44082- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44083+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44084 counter[CM_LAP_COUNTER]);
44085 goto unlock;
44086 default:
44087@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
44088 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
44089 if (cur_cm_id_priv) {
44090 spin_unlock_irq(&cm.lock);
44091- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44092+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44093 counter[CM_SIDR_REQ_COUNTER]);
44094 goto out; /* Duplicate message. */
44095 }
44096@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
44097 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
44098 msg->retries = 1;
44099
44100- atomic_long_add(1 + msg->retries,
44101+ atomic_long_add_unchecked(1 + msg->retries,
44102 &port->counter_group[CM_XMIT].counter[attr_index]);
44103 if (msg->retries)
44104- atomic_long_add(msg->retries,
44105+ atomic_long_add_unchecked(msg->retries,
44106 &port->counter_group[CM_XMIT_RETRIES].
44107 counter[attr_index]);
44108
44109@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
44110 }
44111
44112 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
44113- atomic_long_inc(&port->counter_group[CM_RECV].
44114+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
44115 counter[attr_id - CM_ATTR_ID_OFFSET]);
44116
44117 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
44118@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
44119 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
44120
44121 return sprintf(buf, "%ld\n",
44122- atomic_long_read(&group->counter[cm_attr->index]));
44123+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
44124 }
44125
44126 static const struct sysfs_ops cm_counter_ops = {
44127diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
44128index 9f5ad7c..588cd84 100644
44129--- a/drivers/infiniband/core/fmr_pool.c
44130+++ b/drivers/infiniband/core/fmr_pool.c
44131@@ -98,8 +98,8 @@ struct ib_fmr_pool {
44132
44133 struct task_struct *thread;
44134
44135- atomic_t req_ser;
44136- atomic_t flush_ser;
44137+ atomic_unchecked_t req_ser;
44138+ atomic_unchecked_t flush_ser;
44139
44140 wait_queue_head_t force_wait;
44141 };
44142@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
44143 struct ib_fmr_pool *pool = pool_ptr;
44144
44145 do {
44146- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
44147+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
44148 ib_fmr_batch_release(pool);
44149
44150- atomic_inc(&pool->flush_ser);
44151+ atomic_inc_unchecked(&pool->flush_ser);
44152 wake_up_interruptible(&pool->force_wait);
44153
44154 if (pool->flush_function)
44155@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
44156 }
44157
44158 set_current_state(TASK_INTERRUPTIBLE);
44159- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
44160+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
44161 !kthread_should_stop())
44162 schedule();
44163 __set_current_state(TASK_RUNNING);
44164@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
44165 pool->dirty_watermark = params->dirty_watermark;
44166 pool->dirty_len = 0;
44167 spin_lock_init(&pool->pool_lock);
44168- atomic_set(&pool->req_ser, 0);
44169- atomic_set(&pool->flush_ser, 0);
44170+ atomic_set_unchecked(&pool->req_ser, 0);
44171+ atomic_set_unchecked(&pool->flush_ser, 0);
44172 init_waitqueue_head(&pool->force_wait);
44173
44174 pool->thread = kthread_run(ib_fmr_cleanup_thread,
44175@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
44176 }
44177 spin_unlock_irq(&pool->pool_lock);
44178
44179- serial = atomic_inc_return(&pool->req_ser);
44180+ serial = atomic_inc_return_unchecked(&pool->req_ser);
44181 wake_up_process(pool->thread);
44182
44183 if (wait_event_interruptible(pool->force_wait,
44184- atomic_read(&pool->flush_ser) - serial >= 0))
44185+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
44186 return -EINTR;
44187
44188 return 0;
44189@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
44190 } else {
44191 list_add_tail(&fmr->list, &pool->dirty_list);
44192 if (++pool->dirty_len >= pool->dirty_watermark) {
44193- atomic_inc(&pool->req_ser);
44194+ atomic_inc_unchecked(&pool->req_ser);
44195 wake_up_process(pool->thread);
44196 }
44197 }
44198diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
44199index ec7a298..8742e59 100644
44200--- a/drivers/infiniband/hw/cxgb4/mem.c
44201+++ b/drivers/infiniband/hw/cxgb4/mem.c
44202@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
44203 int err;
44204 struct fw_ri_tpte tpt;
44205 u32 stag_idx;
44206- static atomic_t key;
44207+ static atomic_unchecked_t key;
44208
44209 if (c4iw_fatal_error(rdev))
44210 return -EIO;
44211@@ -270,7 +270,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
44212 if (rdev->stats.stag.cur > rdev->stats.stag.max)
44213 rdev->stats.stag.max = rdev->stats.stag.cur;
44214 mutex_unlock(&rdev->stats.lock);
44215- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
44216+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
44217 }
44218 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
44219 __func__, stag_state, type, pdid, stag_idx);
44220diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
44221index 79b3dbc..96e5fcc 100644
44222--- a/drivers/infiniband/hw/ipath/ipath_rc.c
44223+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
44224@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
44225 struct ib_atomic_eth *ateth;
44226 struct ipath_ack_entry *e;
44227 u64 vaddr;
44228- atomic64_t *maddr;
44229+ atomic64_unchecked_t *maddr;
44230 u64 sdata;
44231 u32 rkey;
44232 u8 next;
44233@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
44234 IB_ACCESS_REMOTE_ATOMIC)))
44235 goto nack_acc_unlck;
44236 /* Perform atomic OP and save result. */
44237- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
44238+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
44239 sdata = be64_to_cpu(ateth->swap_data);
44240 e = &qp->s_ack_queue[qp->r_head_ack_queue];
44241 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
44242- (u64) atomic64_add_return(sdata, maddr) - sdata :
44243+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
44244 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
44245 be64_to_cpu(ateth->compare_data),
44246 sdata);
44247diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
44248index 1f95bba..9530f87 100644
44249--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
44250+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
44251@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
44252 unsigned long flags;
44253 struct ib_wc wc;
44254 u64 sdata;
44255- atomic64_t *maddr;
44256+ atomic64_unchecked_t *maddr;
44257 enum ib_wc_status send_status;
44258
44259 /*
44260@@ -382,11 +382,11 @@ again:
44261 IB_ACCESS_REMOTE_ATOMIC)))
44262 goto acc_err;
44263 /* Perform atomic OP and save result. */
44264- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
44265+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
44266 sdata = wqe->wr.wr.atomic.compare_add;
44267 *(u64 *) sqp->s_sge.sge.vaddr =
44268 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
44269- (u64) atomic64_add_return(sdata, maddr) - sdata :
44270+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
44271 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
44272 sdata, wqe->wr.wr.atomic.swap);
44273 goto send_comp;
44274diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
44275index 287ad05..5ae7b44d 100644
44276--- a/drivers/infiniband/hw/mlx4/mad.c
44277+++ b/drivers/infiniband/hw/mlx4/mad.c
44278@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
44279
44280 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
44281 {
44282- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
44283+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
44284 cpu_to_be64(0xff00000000000000LL);
44285 }
44286
44287diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
44288index ed327e6..ca1739e0 100644
44289--- a/drivers/infiniband/hw/mlx4/mcg.c
44290+++ b/drivers/infiniband/hw/mlx4/mcg.c
44291@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
44292 {
44293 char name[20];
44294
44295- atomic_set(&ctx->tid, 0);
44296+ atomic_set_unchecked(&ctx->tid, 0);
44297 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
44298 ctx->mcg_wq = create_singlethread_workqueue(name);
44299 if (!ctx->mcg_wq)
44300diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
44301index 369da3c..223e6e9 100644
44302--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
44303+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
44304@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
44305 struct list_head mcg_mgid0_list;
44306 struct workqueue_struct *mcg_wq;
44307 struct mlx4_ib_demux_pv_ctx **tun;
44308- atomic_t tid;
44309+ atomic_unchecked_t tid;
44310 int flushing; /* flushing the work queue */
44311 };
44312
44313diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
44314index 9d3e5c1..6f166df 100644
44315--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
44316+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
44317@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
44318 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
44319 }
44320
44321-int mthca_QUERY_FW(struct mthca_dev *dev)
44322+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
44323 {
44324 struct mthca_mailbox *mailbox;
44325 u32 *outbox;
44326@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44327 CMD_TIME_CLASS_B);
44328 }
44329
44330-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44331+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44332 int num_mtt)
44333 {
44334 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
44335@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
44336 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
44337 }
44338
44339-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44340+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44341 int eq_num)
44342 {
44343 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
44344@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
44345 CMD_TIME_CLASS_B);
44346 }
44347
44348-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
44349+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
44350 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
44351 void *in_mad, void *response_mad)
44352 {
44353diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
44354index ded76c1..0cf0a08 100644
44355--- a/drivers/infiniband/hw/mthca/mthca_main.c
44356+++ b/drivers/infiniband/hw/mthca/mthca_main.c
44357@@ -692,7 +692,7 @@ err_close:
44358 return err;
44359 }
44360
44361-static int mthca_setup_hca(struct mthca_dev *dev)
44362+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
44363 {
44364 int err;
44365
44366diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
44367index ed9a989..6aa5dc2 100644
44368--- a/drivers/infiniband/hw/mthca/mthca_mr.c
44369+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
44370@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
44371 * through the bitmaps)
44372 */
44373
44374-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
44375+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
44376 {
44377 int o;
44378 int m;
44379@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
44380 return key;
44381 }
44382
44383-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
44384+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
44385 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
44386 {
44387 struct mthca_mailbox *mailbox;
44388@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
44389 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
44390 }
44391
44392-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
44393+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
44394 u64 *buffer_list, int buffer_size_shift,
44395 int list_len, u64 iova, u64 total_size,
44396 u32 access, struct mthca_mr *mr)
44397diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
44398index 415f8e1..e34214e 100644
44399--- a/drivers/infiniband/hw/mthca/mthca_provider.c
44400+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
44401@@ -764,7 +764,7 @@ unlock:
44402 return 0;
44403 }
44404
44405-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
44406+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
44407 {
44408 struct mthca_dev *dev = to_mdev(ibcq->device);
44409 struct mthca_cq *cq = to_mcq(ibcq);
44410diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
44411index 3b2a6dc..bce26ff 100644
44412--- a/drivers/infiniband/hw/nes/nes.c
44413+++ b/drivers/infiniband/hw/nes/nes.c
44414@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
44415 LIST_HEAD(nes_adapter_list);
44416 static LIST_HEAD(nes_dev_list);
44417
44418-atomic_t qps_destroyed;
44419+atomic_unchecked_t qps_destroyed;
44420
44421 static unsigned int ee_flsh_adapter;
44422 static unsigned int sysfs_nonidx_addr;
44423@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
44424 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
44425 struct nes_adapter *nesadapter = nesdev->nesadapter;
44426
44427- atomic_inc(&qps_destroyed);
44428+ atomic_inc_unchecked(&qps_destroyed);
44429
44430 /* Free the control structures */
44431
44432diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
44433index bd9d132..70d84f4 100644
44434--- a/drivers/infiniband/hw/nes/nes.h
44435+++ b/drivers/infiniband/hw/nes/nes.h
44436@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
44437 extern unsigned int wqm_quanta;
44438 extern struct list_head nes_adapter_list;
44439
44440-extern atomic_t cm_connects;
44441-extern atomic_t cm_accepts;
44442-extern atomic_t cm_disconnects;
44443-extern atomic_t cm_closes;
44444-extern atomic_t cm_connecteds;
44445-extern atomic_t cm_connect_reqs;
44446-extern atomic_t cm_rejects;
44447-extern atomic_t mod_qp_timouts;
44448-extern atomic_t qps_created;
44449-extern atomic_t qps_destroyed;
44450-extern atomic_t sw_qps_destroyed;
44451+extern atomic_unchecked_t cm_connects;
44452+extern atomic_unchecked_t cm_accepts;
44453+extern atomic_unchecked_t cm_disconnects;
44454+extern atomic_unchecked_t cm_closes;
44455+extern atomic_unchecked_t cm_connecteds;
44456+extern atomic_unchecked_t cm_connect_reqs;
44457+extern atomic_unchecked_t cm_rejects;
44458+extern atomic_unchecked_t mod_qp_timouts;
44459+extern atomic_unchecked_t qps_created;
44460+extern atomic_unchecked_t qps_destroyed;
44461+extern atomic_unchecked_t sw_qps_destroyed;
44462 extern u32 mh_detected;
44463 extern u32 mh_pauses_sent;
44464 extern u32 cm_packets_sent;
44465@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
44466 extern u32 cm_packets_received;
44467 extern u32 cm_packets_dropped;
44468 extern u32 cm_packets_retrans;
44469-extern atomic_t cm_listens_created;
44470-extern atomic_t cm_listens_destroyed;
44471+extern atomic_unchecked_t cm_listens_created;
44472+extern atomic_unchecked_t cm_listens_destroyed;
44473 extern u32 cm_backlog_drops;
44474-extern atomic_t cm_loopbacks;
44475-extern atomic_t cm_nodes_created;
44476-extern atomic_t cm_nodes_destroyed;
44477-extern atomic_t cm_accel_dropped_pkts;
44478-extern atomic_t cm_resets_recvd;
44479-extern atomic_t pau_qps_created;
44480-extern atomic_t pau_qps_destroyed;
44481+extern atomic_unchecked_t cm_loopbacks;
44482+extern atomic_unchecked_t cm_nodes_created;
44483+extern atomic_unchecked_t cm_nodes_destroyed;
44484+extern atomic_unchecked_t cm_accel_dropped_pkts;
44485+extern atomic_unchecked_t cm_resets_recvd;
44486+extern atomic_unchecked_t pau_qps_created;
44487+extern atomic_unchecked_t pau_qps_destroyed;
44488
44489 extern u32 int_mod_timer_init;
44490 extern u32 int_mod_cq_depth_256;
44491diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
44492index 6f09a72..cf4399d 100644
44493--- a/drivers/infiniband/hw/nes/nes_cm.c
44494+++ b/drivers/infiniband/hw/nes/nes_cm.c
44495@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
44496 u32 cm_packets_retrans;
44497 u32 cm_packets_created;
44498 u32 cm_packets_received;
44499-atomic_t cm_listens_created;
44500-atomic_t cm_listens_destroyed;
44501+atomic_unchecked_t cm_listens_created;
44502+atomic_unchecked_t cm_listens_destroyed;
44503 u32 cm_backlog_drops;
44504-atomic_t cm_loopbacks;
44505-atomic_t cm_nodes_created;
44506-atomic_t cm_nodes_destroyed;
44507-atomic_t cm_accel_dropped_pkts;
44508-atomic_t cm_resets_recvd;
44509+atomic_unchecked_t cm_loopbacks;
44510+atomic_unchecked_t cm_nodes_created;
44511+atomic_unchecked_t cm_nodes_destroyed;
44512+atomic_unchecked_t cm_accel_dropped_pkts;
44513+atomic_unchecked_t cm_resets_recvd;
44514
44515 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
44516 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
44517@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
44518 /* instance of function pointers for client API */
44519 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
44520 static struct nes_cm_ops nes_cm_api = {
44521- mini_cm_accelerated,
44522- mini_cm_listen,
44523- mini_cm_del_listen,
44524- mini_cm_connect,
44525- mini_cm_close,
44526- mini_cm_accept,
44527- mini_cm_reject,
44528- mini_cm_recv_pkt,
44529- mini_cm_dealloc_core,
44530- mini_cm_get,
44531- mini_cm_set
44532+ .accelerated = mini_cm_accelerated,
44533+ .listen = mini_cm_listen,
44534+ .stop_listener = mini_cm_del_listen,
44535+ .connect = mini_cm_connect,
44536+ .close = mini_cm_close,
44537+ .accept = mini_cm_accept,
44538+ .reject = mini_cm_reject,
44539+ .recv_pkt = mini_cm_recv_pkt,
44540+ .destroy_cm_core = mini_cm_dealloc_core,
44541+ .get = mini_cm_get,
44542+ .set = mini_cm_set
44543 };
44544
44545 static struct nes_cm_core *g_cm_core;
44546
44547-atomic_t cm_connects;
44548-atomic_t cm_accepts;
44549-atomic_t cm_disconnects;
44550-atomic_t cm_closes;
44551-atomic_t cm_connecteds;
44552-atomic_t cm_connect_reqs;
44553-atomic_t cm_rejects;
44554+atomic_unchecked_t cm_connects;
44555+atomic_unchecked_t cm_accepts;
44556+atomic_unchecked_t cm_disconnects;
44557+atomic_unchecked_t cm_closes;
44558+atomic_unchecked_t cm_connecteds;
44559+atomic_unchecked_t cm_connect_reqs;
44560+atomic_unchecked_t cm_rejects;
44561
44562 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
44563 {
44564@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
44565 kfree(listener);
44566 listener = NULL;
44567 ret = 0;
44568- atomic_inc(&cm_listens_destroyed);
44569+ atomic_inc_unchecked(&cm_listens_destroyed);
44570 } else {
44571 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
44572 }
44573@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
44574 cm_node->rem_mac);
44575
44576 add_hte_node(cm_core, cm_node);
44577- atomic_inc(&cm_nodes_created);
44578+ atomic_inc_unchecked(&cm_nodes_created);
44579
44580 return cm_node;
44581 }
44582@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
44583 }
44584
44585 atomic_dec(&cm_core->node_cnt);
44586- atomic_inc(&cm_nodes_destroyed);
44587+ atomic_inc_unchecked(&cm_nodes_destroyed);
44588 nesqp = cm_node->nesqp;
44589 if (nesqp) {
44590 nesqp->cm_node = NULL;
44591@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
44592
44593 static void drop_packet(struct sk_buff *skb)
44594 {
44595- atomic_inc(&cm_accel_dropped_pkts);
44596+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
44597 dev_kfree_skb_any(skb);
44598 }
44599
44600@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
44601 {
44602
44603 int reset = 0; /* whether to send reset in case of err.. */
44604- atomic_inc(&cm_resets_recvd);
44605+ atomic_inc_unchecked(&cm_resets_recvd);
44606 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
44607 " refcnt=%d\n", cm_node, cm_node->state,
44608 atomic_read(&cm_node->ref_count));
44609@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
44610 rem_ref_cm_node(cm_node->cm_core, cm_node);
44611 return NULL;
44612 }
44613- atomic_inc(&cm_loopbacks);
44614+ atomic_inc_unchecked(&cm_loopbacks);
44615 loopbackremotenode->loopbackpartner = cm_node;
44616 loopbackremotenode->tcp_cntxt.rcv_wscale =
44617 NES_CM_DEFAULT_RCV_WND_SCALE;
44618@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
44619 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
44620 else {
44621 rem_ref_cm_node(cm_core, cm_node);
44622- atomic_inc(&cm_accel_dropped_pkts);
44623+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
44624 dev_kfree_skb_any(skb);
44625 }
44626 break;
44627@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
44628
44629 if ((cm_id) && (cm_id->event_handler)) {
44630 if (issue_disconn) {
44631- atomic_inc(&cm_disconnects);
44632+ atomic_inc_unchecked(&cm_disconnects);
44633 cm_event.event = IW_CM_EVENT_DISCONNECT;
44634 cm_event.status = disconn_status;
44635 cm_event.local_addr = cm_id->local_addr;
44636@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
44637 }
44638
44639 if (issue_close) {
44640- atomic_inc(&cm_closes);
44641+ atomic_inc_unchecked(&cm_closes);
44642 nes_disconnect(nesqp, 1);
44643
44644 cm_id->provider_data = nesqp;
44645@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
44646
44647 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
44648 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
44649- atomic_inc(&cm_accepts);
44650+ atomic_inc_unchecked(&cm_accepts);
44651
44652 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
44653 netdev_refcnt_read(nesvnic->netdev));
44654@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
44655 struct nes_cm_core *cm_core;
44656 u8 *start_buff;
44657
44658- atomic_inc(&cm_rejects);
44659+ atomic_inc_unchecked(&cm_rejects);
44660 cm_node = (struct nes_cm_node *)cm_id->provider_data;
44661 loopback = cm_node->loopbackpartner;
44662 cm_core = cm_node->cm_core;
44663@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
44664 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
44665 ntohs(laddr->sin_port));
44666
44667- atomic_inc(&cm_connects);
44668+ atomic_inc_unchecked(&cm_connects);
44669 nesqp->active_conn = 1;
44670
44671 /* cache the cm_id in the qp */
44672@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
44673 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
44674 return err;
44675 }
44676- atomic_inc(&cm_listens_created);
44677+ atomic_inc_unchecked(&cm_listens_created);
44678 }
44679
44680 cm_id->add_ref(cm_id);
44681@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
44682
44683 if (nesqp->destroyed)
44684 return;
44685- atomic_inc(&cm_connecteds);
44686+ atomic_inc_unchecked(&cm_connecteds);
44687 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
44688 " local port 0x%04X. jiffies = %lu.\n",
44689 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
44690@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
44691
44692 cm_id->add_ref(cm_id);
44693 ret = cm_id->event_handler(cm_id, &cm_event);
44694- atomic_inc(&cm_closes);
44695+ atomic_inc_unchecked(&cm_closes);
44696 cm_event.event = IW_CM_EVENT_CLOSE;
44697 cm_event.status = 0;
44698 cm_event.provider_data = cm_id->provider_data;
44699@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
44700 return;
44701 cm_id = cm_node->cm_id;
44702
44703- atomic_inc(&cm_connect_reqs);
44704+ atomic_inc_unchecked(&cm_connect_reqs);
44705 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
44706 cm_node, cm_id, jiffies);
44707
44708@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
44709 return;
44710 cm_id = cm_node->cm_id;
44711
44712- atomic_inc(&cm_connect_reqs);
44713+ atomic_inc_unchecked(&cm_connect_reqs);
44714 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
44715 cm_node, cm_id, jiffies);
44716
44717diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
44718index 4166452..fc952c3 100644
44719--- a/drivers/infiniband/hw/nes/nes_mgt.c
44720+++ b/drivers/infiniband/hw/nes/nes_mgt.c
44721@@ -40,8 +40,8 @@
44722 #include "nes.h"
44723 #include "nes_mgt.h"
44724
44725-atomic_t pau_qps_created;
44726-atomic_t pau_qps_destroyed;
44727+atomic_unchecked_t pau_qps_created;
44728+atomic_unchecked_t pau_qps_destroyed;
44729
44730 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
44731 {
44732@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
44733 {
44734 struct sk_buff *skb;
44735 unsigned long flags;
44736- atomic_inc(&pau_qps_destroyed);
44737+ atomic_inc_unchecked(&pau_qps_destroyed);
44738
44739 /* Free packets that have not yet been forwarded */
44740 /* Lock is acquired by skb_dequeue when removing the skb */
44741@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
44742 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
44743 skb_queue_head_init(&nesqp->pau_list);
44744 spin_lock_init(&nesqp->pau_lock);
44745- atomic_inc(&pau_qps_created);
44746+ atomic_inc_unchecked(&pau_qps_created);
44747 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
44748 }
44749
44750diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
44751index 49eb511..a774366 100644
44752--- a/drivers/infiniband/hw/nes/nes_nic.c
44753+++ b/drivers/infiniband/hw/nes/nes_nic.c
44754@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
44755 target_stat_values[++index] = mh_detected;
44756 target_stat_values[++index] = mh_pauses_sent;
44757 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
44758- target_stat_values[++index] = atomic_read(&cm_connects);
44759- target_stat_values[++index] = atomic_read(&cm_accepts);
44760- target_stat_values[++index] = atomic_read(&cm_disconnects);
44761- target_stat_values[++index] = atomic_read(&cm_connecteds);
44762- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
44763- target_stat_values[++index] = atomic_read(&cm_rejects);
44764- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
44765- target_stat_values[++index] = atomic_read(&qps_created);
44766- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
44767- target_stat_values[++index] = atomic_read(&qps_destroyed);
44768- target_stat_values[++index] = atomic_read(&cm_closes);
44769+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
44770+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
44771+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
44772+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
44773+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
44774+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
44775+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
44776+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
44777+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
44778+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
44779+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
44780 target_stat_values[++index] = cm_packets_sent;
44781 target_stat_values[++index] = cm_packets_bounced;
44782 target_stat_values[++index] = cm_packets_created;
44783 target_stat_values[++index] = cm_packets_received;
44784 target_stat_values[++index] = cm_packets_dropped;
44785 target_stat_values[++index] = cm_packets_retrans;
44786- target_stat_values[++index] = atomic_read(&cm_listens_created);
44787- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
44788+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
44789+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
44790 target_stat_values[++index] = cm_backlog_drops;
44791- target_stat_values[++index] = atomic_read(&cm_loopbacks);
44792- target_stat_values[++index] = atomic_read(&cm_nodes_created);
44793- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
44794- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
44795- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
44796+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
44797+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
44798+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
44799+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
44800+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
44801 target_stat_values[++index] = nesadapter->free_4kpbl;
44802 target_stat_values[++index] = nesadapter->free_256pbl;
44803 target_stat_values[++index] = int_mod_timer_init;
44804 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
44805 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
44806 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
44807- target_stat_values[++index] = atomic_read(&pau_qps_created);
44808- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
44809+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
44810+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
44811 }
44812
44813 /**
44814diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
44815index 218dd35..97ce31d 100644
44816--- a/drivers/infiniband/hw/nes/nes_verbs.c
44817+++ b/drivers/infiniband/hw/nes/nes_verbs.c
44818@@ -46,9 +46,9 @@
44819
44820 #include <rdma/ib_umem.h>
44821
44822-atomic_t mod_qp_timouts;
44823-atomic_t qps_created;
44824-atomic_t sw_qps_destroyed;
44825+atomic_unchecked_t mod_qp_timouts;
44826+atomic_unchecked_t qps_created;
44827+atomic_unchecked_t sw_qps_destroyed;
44828
44829 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
44830
44831@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
44832 if (init_attr->create_flags)
44833 return ERR_PTR(-EINVAL);
44834
44835- atomic_inc(&qps_created);
44836+ atomic_inc_unchecked(&qps_created);
44837 switch (init_attr->qp_type) {
44838 case IB_QPT_RC:
44839 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
44840@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
44841 struct iw_cm_event cm_event;
44842 int ret = 0;
44843
44844- atomic_inc(&sw_qps_destroyed);
44845+ atomic_inc_unchecked(&sw_qps_destroyed);
44846 nesqp->destroyed = 1;
44847
44848 /* Blow away the connection if it exists. */
44849diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
44850index c00ae09..04e91be 100644
44851--- a/drivers/infiniband/hw/qib/qib.h
44852+++ b/drivers/infiniband/hw/qib/qib.h
44853@@ -52,6 +52,7 @@
44854 #include <linux/kref.h>
44855 #include <linux/sched.h>
44856 #include <linux/kthread.h>
44857+#include <linux/slab.h>
44858
44859 #include "qib_common.h"
44860 #include "qib_verbs.h"
44861diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
44862index 24c41ba..102d71f 100644
44863--- a/drivers/input/gameport/gameport.c
44864+++ b/drivers/input/gameport/gameport.c
44865@@ -490,14 +490,14 @@ EXPORT_SYMBOL(gameport_set_phys);
44866 */
44867 static void gameport_init_port(struct gameport *gameport)
44868 {
44869- static atomic_t gameport_no = ATOMIC_INIT(0);
44870+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
44871
44872 __module_get(THIS_MODULE);
44873
44874 mutex_init(&gameport->drv_mutex);
44875 device_initialize(&gameport->dev);
44876 dev_set_name(&gameport->dev, "gameport%lu",
44877- (unsigned long)atomic_inc_return(&gameport_no) - 1);
44878+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
44879 gameport->dev.bus = &gameport_bus;
44880 gameport->dev.release = gameport_release_port;
44881 if (gameport->parent)
44882diff --git a/drivers/input/input.c b/drivers/input/input.c
44883index 29ca0bb..f4bc2e3 100644
44884--- a/drivers/input/input.c
44885+++ b/drivers/input/input.c
44886@@ -1774,7 +1774,7 @@ EXPORT_SYMBOL_GPL(input_class);
44887 */
44888 struct input_dev *input_allocate_device(void)
44889 {
44890- static atomic_t input_no = ATOMIC_INIT(0);
44891+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
44892 struct input_dev *dev;
44893
44894 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
44895@@ -1789,7 +1789,7 @@ struct input_dev *input_allocate_device(void)
44896 INIT_LIST_HEAD(&dev->node);
44897
44898 dev_set_name(&dev->dev, "input%ld",
44899- (unsigned long) atomic_inc_return(&input_no) - 1);
44900+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
44901
44902 __module_get(THIS_MODULE);
44903 }
44904diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
44905index 4a95b22..874c182 100644
44906--- a/drivers/input/joystick/sidewinder.c
44907+++ b/drivers/input/joystick/sidewinder.c
44908@@ -30,6 +30,7 @@
44909 #include <linux/kernel.h>
44910 #include <linux/module.h>
44911 #include <linux/slab.h>
44912+#include <linux/sched.h>
44913 #include <linux/input.h>
44914 #include <linux/gameport.h>
44915 #include <linux/jiffies.h>
44916diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
44917index 603fe0d..f63decc 100644
44918--- a/drivers/input/joystick/xpad.c
44919+++ b/drivers/input/joystick/xpad.c
44920@@ -737,7 +737,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
44921
44922 static int xpad_led_probe(struct usb_xpad *xpad)
44923 {
44924- static atomic_t led_seq = ATOMIC_INIT(0);
44925+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
44926 long led_no;
44927 struct xpad_led *led;
44928 struct led_classdev *led_cdev;
44929@@ -750,7 +750,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
44930 if (!led)
44931 return -ENOMEM;
44932
44933- led_no = (long)atomic_inc_return(&led_seq) - 1;
44934+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
44935
44936 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
44937 led->xpad = xpad;
44938diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
44939index 719410f..1896169 100644
44940--- a/drivers/input/misc/ims-pcu.c
44941+++ b/drivers/input/misc/ims-pcu.c
44942@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
44943
44944 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44945 {
44946- static atomic_t device_no = ATOMIC_INIT(0);
44947+ static atomic_unchecked_t device_no = ATOMIC_INIT(0);
44948
44949 const struct ims_pcu_device_info *info;
44950 int error;
44951@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44952 }
44953
44954 /* Device appears to be operable, complete initialization */
44955- pcu->device_no = atomic_inc_return(&device_no) - 1;
44956+ pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
44957
44958 /*
44959 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
44960diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
44961index 2f0b39d..7370f13 100644
44962--- a/drivers/input/mouse/psmouse.h
44963+++ b/drivers/input/mouse/psmouse.h
44964@@ -116,7 +116,7 @@ struct psmouse_attribute {
44965 ssize_t (*set)(struct psmouse *psmouse, void *data,
44966 const char *buf, size_t count);
44967 bool protect;
44968-};
44969+} __do_const;
44970 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
44971
44972 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
44973diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
44974index b604564..3f14ae4 100644
44975--- a/drivers/input/mousedev.c
44976+++ b/drivers/input/mousedev.c
44977@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
44978
44979 spin_unlock_irq(&client->packet_lock);
44980
44981- if (copy_to_user(buffer, data, count))
44982+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
44983 return -EFAULT;
44984
44985 return count;
44986diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
44987index b29134d..394deb0 100644
44988--- a/drivers/input/serio/serio.c
44989+++ b/drivers/input/serio/serio.c
44990@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
44991 */
44992 static void serio_init_port(struct serio *serio)
44993 {
44994- static atomic_t serio_no = ATOMIC_INIT(0);
44995+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
44996
44997 __module_get(THIS_MODULE);
44998
44999@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
45000 mutex_init(&serio->drv_mutex);
45001 device_initialize(&serio->dev);
45002 dev_set_name(&serio->dev, "serio%ld",
45003- (long)atomic_inc_return(&serio_no) - 1);
45004+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
45005 serio->dev.bus = &serio_bus;
45006 serio->dev.release = serio_release_port;
45007 serio->dev.groups = serio_device_attr_groups;
45008diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
45009index c9a02fe..0debc75 100644
45010--- a/drivers/input/serio/serio_raw.c
45011+++ b/drivers/input/serio/serio_raw.c
45012@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
45013
45014 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
45015 {
45016- static atomic_t serio_raw_no = ATOMIC_INIT(0);
45017+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
45018 struct serio_raw *serio_raw;
45019 int err;
45020
45021@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
45022 }
45023
45024 snprintf(serio_raw->name, sizeof(serio_raw->name),
45025- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
45026+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
45027 kref_init(&serio_raw->kref);
45028 INIT_LIST_HEAD(&serio_raw->client_list);
45029 init_waitqueue_head(&serio_raw->wait);
45030diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
45031index e5555fc..937986d 100644
45032--- a/drivers/iommu/iommu.c
45033+++ b/drivers/iommu/iommu.c
45034@@ -588,7 +588,7 @@ static struct notifier_block iommu_bus_nb = {
45035 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
45036 {
45037 bus_register_notifier(bus, &iommu_bus_nb);
45038- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
45039+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
45040 }
45041
45042 /**
45043diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
45044index 33c4395..e06447e 100644
45045--- a/drivers/iommu/irq_remapping.c
45046+++ b/drivers/iommu/irq_remapping.c
45047@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
45048 void panic_if_irq_remap(const char *msg)
45049 {
45050 if (irq_remapping_enabled)
45051- panic(msg);
45052+ panic("%s", msg);
45053 }
45054
45055 static void ir_ack_apic_edge(struct irq_data *data)
45056@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
45057
45058 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
45059 {
45060- chip->irq_print_chip = ir_print_prefix;
45061- chip->irq_ack = ir_ack_apic_edge;
45062- chip->irq_eoi = ir_ack_apic_level;
45063- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
45064+ pax_open_kernel();
45065+ *(void **)&chip->irq_print_chip = ir_print_prefix;
45066+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
45067+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
45068+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
45069+ pax_close_kernel();
45070 }
45071
45072 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
45073diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
45074index 7c131cf..035129b 100644
45075--- a/drivers/irqchip/irq-gic.c
45076+++ b/drivers/irqchip/irq-gic.c
45077@@ -85,7 +85,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
45078 * Supported arch specific GIC irq extension.
45079 * Default make them NULL.
45080 */
45081-struct irq_chip gic_arch_extn = {
45082+irq_chip_no_const gic_arch_extn = {
45083 .irq_eoi = NULL,
45084 .irq_mask = NULL,
45085 .irq_unmask = NULL,
45086@@ -337,7 +337,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
45087 chained_irq_exit(chip, desc);
45088 }
45089
45090-static struct irq_chip gic_chip = {
45091+static irq_chip_no_const gic_chip __read_only = {
45092 .name = "GIC",
45093 .irq_mask = gic_mask_irq,
45094 .irq_unmask = gic_unmask_irq,
45095diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
45096index 8777065..a4a9967 100644
45097--- a/drivers/irqchip/irq-renesas-irqc.c
45098+++ b/drivers/irqchip/irq-renesas-irqc.c
45099@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
45100 struct irqc_priv *p;
45101 struct resource *io;
45102 struct resource *irq;
45103- struct irq_chip *irq_chip;
45104+ irq_chip_no_const *irq_chip;
45105 const char *name = dev_name(&pdev->dev);
45106 int ret;
45107 int k;
45108diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
45109index f9a87ed..3fdd854 100644
45110--- a/drivers/isdn/capi/capi.c
45111+++ b/drivers/isdn/capi/capi.c
45112@@ -81,8 +81,8 @@ struct capiminor {
45113
45114 struct capi20_appl *ap;
45115 u32 ncci;
45116- atomic_t datahandle;
45117- atomic_t msgid;
45118+ atomic_unchecked_t datahandle;
45119+ atomic_unchecked_t msgid;
45120
45121 struct tty_port port;
45122 int ttyinstop;
45123@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
45124 capimsg_setu16(s, 2, mp->ap->applid);
45125 capimsg_setu8 (s, 4, CAPI_DATA_B3);
45126 capimsg_setu8 (s, 5, CAPI_RESP);
45127- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
45128+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
45129 capimsg_setu32(s, 8, mp->ncci);
45130 capimsg_setu16(s, 12, datahandle);
45131 }
45132@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
45133 mp->outbytes -= len;
45134 spin_unlock_bh(&mp->outlock);
45135
45136- datahandle = atomic_inc_return(&mp->datahandle);
45137+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
45138 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
45139 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
45140 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
45141 capimsg_setu16(skb->data, 2, mp->ap->applid);
45142 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
45143 capimsg_setu8 (skb->data, 5, CAPI_REQ);
45144- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
45145+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
45146 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
45147 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
45148 capimsg_setu16(skb->data, 16, len); /* Data length */
45149diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
45150index b7ae0a0..04590fa 100644
45151--- a/drivers/isdn/gigaset/bas-gigaset.c
45152+++ b/drivers/isdn/gigaset/bas-gigaset.c
45153@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
45154
45155
45156 static const struct gigaset_ops gigops = {
45157- gigaset_write_cmd,
45158- gigaset_write_room,
45159- gigaset_chars_in_buffer,
45160- gigaset_brkchars,
45161- gigaset_init_bchannel,
45162- gigaset_close_bchannel,
45163- gigaset_initbcshw,
45164- gigaset_freebcshw,
45165- gigaset_reinitbcshw,
45166- gigaset_initcshw,
45167- gigaset_freecshw,
45168- gigaset_set_modem_ctrl,
45169- gigaset_baud_rate,
45170- gigaset_set_line_ctrl,
45171- gigaset_isoc_send_skb,
45172- gigaset_isoc_input,
45173+ .write_cmd = gigaset_write_cmd,
45174+ .write_room = gigaset_write_room,
45175+ .chars_in_buffer = gigaset_chars_in_buffer,
45176+ .brkchars = gigaset_brkchars,
45177+ .init_bchannel = gigaset_init_bchannel,
45178+ .close_bchannel = gigaset_close_bchannel,
45179+ .initbcshw = gigaset_initbcshw,
45180+ .freebcshw = gigaset_freebcshw,
45181+ .reinitbcshw = gigaset_reinitbcshw,
45182+ .initcshw = gigaset_initcshw,
45183+ .freecshw = gigaset_freecshw,
45184+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45185+ .baud_rate = gigaset_baud_rate,
45186+ .set_line_ctrl = gigaset_set_line_ctrl,
45187+ .send_skb = gigaset_isoc_send_skb,
45188+ .handle_input = gigaset_isoc_input,
45189 };
45190
45191 /* bas_gigaset_init
45192diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
45193index 600c79b..3752bab 100644
45194--- a/drivers/isdn/gigaset/interface.c
45195+++ b/drivers/isdn/gigaset/interface.c
45196@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
45197 }
45198 tty->driver_data = cs;
45199
45200- ++cs->port.count;
45201+ atomic_inc(&cs->port.count);
45202
45203- if (cs->port.count == 1) {
45204+ if (atomic_read(&cs->port.count) == 1) {
45205 tty_port_tty_set(&cs->port, tty);
45206 cs->port.low_latency = 1;
45207 }
45208@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
45209
45210 if (!cs->connected)
45211 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
45212- else if (!cs->port.count)
45213+ else if (!atomic_read(&cs->port.count))
45214 dev_warn(cs->dev, "%s: device not opened\n", __func__);
45215- else if (!--cs->port.count)
45216+ else if (!atomic_dec_return(&cs->port.count))
45217 tty_port_tty_set(&cs->port, NULL);
45218
45219 mutex_unlock(&cs->mutex);
45220diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
45221index 8c91fd5..14f13ce 100644
45222--- a/drivers/isdn/gigaset/ser-gigaset.c
45223+++ b/drivers/isdn/gigaset/ser-gigaset.c
45224@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
45225 }
45226
45227 static const struct gigaset_ops ops = {
45228- gigaset_write_cmd,
45229- gigaset_write_room,
45230- gigaset_chars_in_buffer,
45231- gigaset_brkchars,
45232- gigaset_init_bchannel,
45233- gigaset_close_bchannel,
45234- gigaset_initbcshw,
45235- gigaset_freebcshw,
45236- gigaset_reinitbcshw,
45237- gigaset_initcshw,
45238- gigaset_freecshw,
45239- gigaset_set_modem_ctrl,
45240- gigaset_baud_rate,
45241- gigaset_set_line_ctrl,
45242- gigaset_m10x_send_skb, /* asyncdata.c */
45243- gigaset_m10x_input, /* asyncdata.c */
45244+ .write_cmd = gigaset_write_cmd,
45245+ .write_room = gigaset_write_room,
45246+ .chars_in_buffer = gigaset_chars_in_buffer,
45247+ .brkchars = gigaset_brkchars,
45248+ .init_bchannel = gigaset_init_bchannel,
45249+ .close_bchannel = gigaset_close_bchannel,
45250+ .initbcshw = gigaset_initbcshw,
45251+ .freebcshw = gigaset_freebcshw,
45252+ .reinitbcshw = gigaset_reinitbcshw,
45253+ .initcshw = gigaset_initcshw,
45254+ .freecshw = gigaset_freecshw,
45255+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45256+ .baud_rate = gigaset_baud_rate,
45257+ .set_line_ctrl = gigaset_set_line_ctrl,
45258+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
45259+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
45260 };
45261
45262
45263diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
45264index d0a41cb..b953e50 100644
45265--- a/drivers/isdn/gigaset/usb-gigaset.c
45266+++ b/drivers/isdn/gigaset/usb-gigaset.c
45267@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
45268 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
45269 memcpy(cs->hw.usb->bchars, buf, 6);
45270 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
45271- 0, 0, &buf, 6, 2000);
45272+ 0, 0, buf, 6, 2000);
45273 }
45274
45275 static void gigaset_freebcshw(struct bc_state *bcs)
45276@@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
45277 }
45278
45279 static const struct gigaset_ops ops = {
45280- gigaset_write_cmd,
45281- gigaset_write_room,
45282- gigaset_chars_in_buffer,
45283- gigaset_brkchars,
45284- gigaset_init_bchannel,
45285- gigaset_close_bchannel,
45286- gigaset_initbcshw,
45287- gigaset_freebcshw,
45288- gigaset_reinitbcshw,
45289- gigaset_initcshw,
45290- gigaset_freecshw,
45291- gigaset_set_modem_ctrl,
45292- gigaset_baud_rate,
45293- gigaset_set_line_ctrl,
45294- gigaset_m10x_send_skb,
45295- gigaset_m10x_input,
45296+ .write_cmd = gigaset_write_cmd,
45297+ .write_room = gigaset_write_room,
45298+ .chars_in_buffer = gigaset_chars_in_buffer,
45299+ .brkchars = gigaset_brkchars,
45300+ .init_bchannel = gigaset_init_bchannel,
45301+ .close_bchannel = gigaset_close_bchannel,
45302+ .initbcshw = gigaset_initbcshw,
45303+ .freebcshw = gigaset_freebcshw,
45304+ .reinitbcshw = gigaset_reinitbcshw,
45305+ .initcshw = gigaset_initcshw,
45306+ .freecshw = gigaset_freecshw,
45307+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45308+ .baud_rate = gigaset_baud_rate,
45309+ .set_line_ctrl = gigaset_set_line_ctrl,
45310+ .send_skb = gigaset_m10x_send_skb,
45311+ .handle_input = gigaset_m10x_input,
45312 };
45313
45314 /*
45315diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
45316index 4d9b195..455075c 100644
45317--- a/drivers/isdn/hardware/avm/b1.c
45318+++ b/drivers/isdn/hardware/avm/b1.c
45319@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
45320 }
45321 if (left) {
45322 if (t4file->user) {
45323- if (copy_from_user(buf, dp, left))
45324+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45325 return -EFAULT;
45326 } else {
45327 memcpy(buf, dp, left);
45328@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
45329 }
45330 if (left) {
45331 if (config->user) {
45332- if (copy_from_user(buf, dp, left))
45333+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45334 return -EFAULT;
45335 } else {
45336 memcpy(buf, dp, left);
45337diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
45338index 9b856e1..fa03c92 100644
45339--- a/drivers/isdn/i4l/isdn_common.c
45340+++ b/drivers/isdn/i4l/isdn_common.c
45341@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
45342 } else
45343 return -EINVAL;
45344 case IIOCDBGVAR:
45345+ if (!capable(CAP_SYS_RAWIO))
45346+ return -EPERM;
45347 if (arg) {
45348 if (copy_to_user(argp, &dev, sizeof(ulong)))
45349 return -EFAULT;
45350diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
45351index 91d5730..336523e 100644
45352--- a/drivers/isdn/i4l/isdn_concap.c
45353+++ b/drivers/isdn/i4l/isdn_concap.c
45354@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
45355 }
45356
45357 struct concap_device_ops isdn_concap_reliable_dl_dops = {
45358- &isdn_concap_dl_data_req,
45359- &isdn_concap_dl_connect_req,
45360- &isdn_concap_dl_disconn_req
45361+ .data_req = &isdn_concap_dl_data_req,
45362+ .connect_req = &isdn_concap_dl_connect_req,
45363+ .disconn_req = &isdn_concap_dl_disconn_req
45364 };
45365
45366 /* The following should better go into a dedicated source file such that
45367diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
45368index 62f0688..38ceac5 100644
45369--- a/drivers/isdn/i4l/isdn_ppp.c
45370+++ b/drivers/isdn/i4l/isdn_ppp.c
45371@@ -378,15 +378,10 @@ isdn_ppp_release(int min, struct file *file)
45372 is->slcomp = NULL;
45373 #endif
45374 #ifdef CONFIG_IPPP_FILTER
45375- if (is->pass_filter) {
45376- sk_unattached_filter_destroy(is->pass_filter);
45377- is->pass_filter = NULL;
45378- }
45379-
45380- if (is->active_filter) {
45381- sk_unattached_filter_destroy(is->active_filter);
45382- is->active_filter = NULL;
45383- }
45384+ kfree(is->pass_filter);
45385+ is->pass_filter = NULL;
45386+ kfree(is->active_filter);
45387+ is->active_filter = NULL;
45388 #endif
45389
45390 /* TODO: if this was the previous master: link the stuff to the new master */
45391@@ -442,7 +437,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
45392 {
45393 struct sock_fprog uprog;
45394 struct sock_filter *code = NULL;
45395- int len;
45396+ int len, err;
45397
45398 if (copy_from_user(&uprog, arg, sizeof(uprog)))
45399 return -EFAULT;
45400@@ -458,6 +453,12 @@ static int get_filter(void __user *arg, struct sock_filter **p)
45401 if (IS_ERR(code))
45402 return PTR_ERR(code);
45403
45404+ err = sk_chk_filter(code, uprog.len);
45405+ if (err) {
45406+ kfree(code);
45407+ return err;
45408+ }
45409+
45410 *p = code;
45411 return uprog.len;
45412 }
45413@@ -628,53 +629,25 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
45414 #ifdef CONFIG_IPPP_FILTER
45415 case PPPIOCSPASS:
45416 {
45417- struct sock_fprog_kern fprog;
45418 struct sock_filter *code;
45419- int err, len = get_filter(argp, &code);
45420-
45421+ int len = get_filter(argp, &code);
45422 if (len < 0)
45423 return len;
45424-
45425- fprog.len = len;
45426- fprog.filter = code;
45427-
45428- if (is->pass_filter) {
45429- sk_unattached_filter_destroy(is->pass_filter);
45430- is->pass_filter = NULL;
45431- }
45432- if (fprog.filter != NULL)
45433- err = sk_unattached_filter_create(&is->pass_filter,
45434- &fprog);
45435- else
45436- err = 0;
45437- kfree(code);
45438-
45439- return err;
45440+ kfree(is->pass_filter);
45441+ is->pass_filter = code;
45442+ is->pass_len = len;
45443+ break;
45444 }
45445 case PPPIOCSACTIVE:
45446 {
45447- struct sock_fprog_kern fprog;
45448 struct sock_filter *code;
45449- int err, len = get_filter(argp, &code);
45450-
45451+ int len = get_filter(argp, &code);
45452 if (len < 0)
45453 return len;
45454-
45455- fprog.len = len;
45456- fprog.filter = code;
45457-
45458- if (is->active_filter) {
45459- sk_unattached_filter_destroy(is->active_filter);
45460- is->active_filter = NULL;
45461- }
45462- if (fprog.filter != NULL)
45463- err = sk_unattached_filter_create(&is->active_filter,
45464- &fprog);
45465- else
45466- err = 0;
45467- kfree(code);
45468-
45469- return err;
45470+ kfree(is->active_filter);
45471+ is->active_filter = code;
45472+ is->active_len = len;
45473+ break;
45474 }
45475 #endif /* CONFIG_IPPP_FILTER */
45476 default:
45477@@ -1174,14 +1147,14 @@ isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *
45478 }
45479
45480 if (is->pass_filter
45481- && SK_RUN_FILTER(is->pass_filter, skb) == 0) {
45482+ && sk_run_filter(skb, is->pass_filter) == 0) {
45483 if (is->debug & 0x2)
45484 printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
45485 kfree_skb(skb);
45486 return;
45487 }
45488 if (!(is->active_filter
45489- && SK_RUN_FILTER(is->active_filter, skb) == 0)) {
45490+ && sk_run_filter(skb, is->active_filter) == 0)) {
45491 if (is->debug & 0x2)
45492 printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
45493 lp->huptimer = 0;
45494@@ -1320,14 +1293,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
45495 }
45496
45497 if (ipt->pass_filter
45498- && SK_RUN_FILTER(ipt->pass_filter, skb) == 0) {
45499+ && sk_run_filter(skb, ipt->pass_filter) == 0) {
45500 if (ipt->debug & 0x4)
45501 printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
45502 kfree_skb(skb);
45503 goto unlock;
45504 }
45505 if (!(ipt->active_filter
45506- && SK_RUN_FILTER(ipt->active_filter, skb) == 0)) {
45507+ && sk_run_filter(skb, ipt->active_filter) == 0)) {
45508 if (ipt->debug & 0x4)
45509 printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
45510 lp->huptimer = 0;
45511@@ -1517,9 +1490,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
45512 }
45513
45514 drop |= is->pass_filter
45515- && SK_RUN_FILTER(is->pass_filter, skb) == 0;
45516+ && sk_run_filter(skb, is->pass_filter) == 0;
45517 drop |= is->active_filter
45518- && SK_RUN_FILTER(is->active_filter, skb) == 0;
45519+ && sk_run_filter(skb, is->active_filter) == 0;
45520
45521 skb_push(skb, IPPP_MAX_HEADER - 4);
45522 return drop;
45523diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
45524index 3c5f249..5fac4d0 100644
45525--- a/drivers/isdn/i4l/isdn_tty.c
45526+++ b/drivers/isdn/i4l/isdn_tty.c
45527@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
45528
45529 #ifdef ISDN_DEBUG_MODEM_OPEN
45530 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
45531- port->count);
45532+ atomic_read(&port->count));
45533 #endif
45534- port->count++;
45535+ atomic_inc(&port->count);
45536 port->tty = tty;
45537 /*
45538 * Start up serial port
45539@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45540 #endif
45541 return;
45542 }
45543- if ((tty->count == 1) && (port->count != 1)) {
45544+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
45545 /*
45546 * Uh, oh. tty->count is 1, which means that the tty
45547 * structure will be freed. Info->count should always
45548@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45549 * serial port won't be shutdown.
45550 */
45551 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
45552- "info->count is %d\n", port->count);
45553- port->count = 1;
45554+ "info->count is %d\n", atomic_read(&port->count));
45555+ atomic_set(&port->count, 1);
45556 }
45557- if (--port->count < 0) {
45558+ if (atomic_dec_return(&port->count) < 0) {
45559 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
45560- info->line, port->count);
45561- port->count = 0;
45562+ info->line, atomic_read(&port->count));
45563+ atomic_set(&port->count, 0);
45564 }
45565- if (port->count) {
45566+ if (atomic_read(&port->count)) {
45567 #ifdef ISDN_DEBUG_MODEM_OPEN
45568 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
45569 #endif
45570@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
45571 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
45572 return;
45573 isdn_tty_shutdown(info);
45574- port->count = 0;
45575+ atomic_set(&port->count, 0);
45576 port->flags &= ~ASYNC_NORMAL_ACTIVE;
45577 port->tty = NULL;
45578 wake_up_interruptible(&port->open_wait);
45579@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
45580 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
45581 modem_info *info = &dev->mdm.info[i];
45582
45583- if (info->port.count == 0)
45584+ if (atomic_read(&info->port.count) == 0)
45585 continue;
45586 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
45587 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
45588diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
45589index e2d4e58..40cd045 100644
45590--- a/drivers/isdn/i4l/isdn_x25iface.c
45591+++ b/drivers/isdn/i4l/isdn_x25iface.c
45592@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
45593
45594
45595 static struct concap_proto_ops ix25_pops = {
45596- &isdn_x25iface_proto_new,
45597- &isdn_x25iface_proto_del,
45598- &isdn_x25iface_proto_restart,
45599- &isdn_x25iface_proto_close,
45600- &isdn_x25iface_xmit,
45601- &isdn_x25iface_receive,
45602- &isdn_x25iface_connect_ind,
45603- &isdn_x25iface_disconn_ind
45604+ .proto_new = &isdn_x25iface_proto_new,
45605+ .proto_del = &isdn_x25iface_proto_del,
45606+ .restart = &isdn_x25iface_proto_restart,
45607+ .close = &isdn_x25iface_proto_close,
45608+ .encap_and_xmit = &isdn_x25iface_xmit,
45609+ .data_ind = &isdn_x25iface_receive,
45610+ .connect_ind = &isdn_x25iface_connect_ind,
45611+ .disconn_ind = &isdn_x25iface_disconn_ind
45612 };
45613
45614 /* error message helper function */
45615diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
45616index 6a7447c..cae33fe 100644
45617--- a/drivers/isdn/icn/icn.c
45618+++ b/drivers/isdn/icn/icn.c
45619@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
45620 if (count > len)
45621 count = len;
45622 if (user) {
45623- if (copy_from_user(msg, buf, count))
45624+ if (count > sizeof msg || copy_from_user(msg, buf, count))
45625 return -EFAULT;
45626 } else
45627 memcpy(msg, buf, count);
45628diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
45629index a4f05c5..1433bc5 100644
45630--- a/drivers/isdn/mISDN/dsp_cmx.c
45631+++ b/drivers/isdn/mISDN/dsp_cmx.c
45632@@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
45633 static u16 dsp_count; /* last sample count */
45634 static int dsp_count_valid; /* if we have last sample count */
45635
45636-void
45637+void __intentional_overflow(-1)
45638 dsp_cmx_send(void *arg)
45639 {
45640 struct dsp_conf *conf;
45641diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
45642index f58a354..fbae176 100644
45643--- a/drivers/leds/leds-clevo-mail.c
45644+++ b/drivers/leds/leds-clevo-mail.c
45645@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
45646 * detected as working, but in reality it is not) as low as
45647 * possible.
45648 */
45649-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
45650+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
45651 {
45652 .callback = clevo_mail_led_dmi_callback,
45653 .ident = "Clevo D410J",
45654diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
45655index 2eb3ef6..295891f 100644
45656--- a/drivers/leds/leds-ss4200.c
45657+++ b/drivers/leds/leds-ss4200.c
45658@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
45659 * detected as working, but in reality it is not) as low as
45660 * possible.
45661 */
45662-static struct dmi_system_id nas_led_whitelist[] __initdata = {
45663+static struct dmi_system_id nas_led_whitelist[] __initconst = {
45664 {
45665 .callback = ss4200_led_dmi_callback,
45666 .ident = "Intel SS4200-E",
45667diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
45668index 0bf1e4e..b4bf44e 100644
45669--- a/drivers/lguest/core.c
45670+++ b/drivers/lguest/core.c
45671@@ -97,9 +97,17 @@ static __init int map_switcher(void)
45672 * The end address needs +1 because __get_vm_area allocates an
45673 * extra guard page, so we need space for that.
45674 */
45675+
45676+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
45677+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45678+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
45679+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45680+#else
45681 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45682 VM_ALLOC, switcher_addr, switcher_addr
45683 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45684+#endif
45685+
45686 if (!switcher_vma) {
45687 err = -ENOMEM;
45688 printk("lguest: could not map switcher pages high\n");
45689@@ -124,7 +132,7 @@ static __init int map_switcher(void)
45690 * Now the Switcher is mapped at the right address, we can't fail!
45691 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
45692 */
45693- memcpy(switcher_vma->addr, start_switcher_text,
45694+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
45695 end_switcher_text - start_switcher_text);
45696
45697 printk(KERN_INFO "lguest: mapped switcher at %p\n",
45698diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
45699index e8b55c3..3514c37 100644
45700--- a/drivers/lguest/page_tables.c
45701+++ b/drivers/lguest/page_tables.c
45702@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
45703 /*:*/
45704
45705 #ifdef CONFIG_X86_PAE
45706-static void release_pmd(pmd_t *spmd)
45707+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
45708 {
45709 /* If the entry's not present, there's nothing to release. */
45710 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
45711diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
45712index 922a1ac..9dd0c2a 100644
45713--- a/drivers/lguest/x86/core.c
45714+++ b/drivers/lguest/x86/core.c
45715@@ -59,7 +59,7 @@ static struct {
45716 /* Offset from where switcher.S was compiled to where we've copied it */
45717 static unsigned long switcher_offset(void)
45718 {
45719- return switcher_addr - (unsigned long)start_switcher_text;
45720+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
45721 }
45722
45723 /* This cpu's struct lguest_pages (after the Switcher text page) */
45724@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
45725 * These copies are pretty cheap, so we do them unconditionally: */
45726 /* Save the current Host top-level page directory.
45727 */
45728+
45729+#ifdef CONFIG_PAX_PER_CPU_PGD
45730+ pages->state.host_cr3 = read_cr3();
45731+#else
45732 pages->state.host_cr3 = __pa(current->mm->pgd);
45733+#endif
45734+
45735 /*
45736 * Set up the Guest's page tables to see this CPU's pages (and no
45737 * other CPU's pages).
45738@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
45739 * compiled-in switcher code and the high-mapped copy we just made.
45740 */
45741 for (i = 0; i < IDT_ENTRIES; i++)
45742- default_idt_entries[i] += switcher_offset();
45743+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
45744
45745 /*
45746 * Set up the Switcher's per-cpu areas.
45747@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
45748 * it will be undisturbed when we switch. To change %cs and jump we
45749 * need this structure to feed to Intel's "lcall" instruction.
45750 */
45751- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
45752+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
45753 lguest_entry.segment = LGUEST_CS;
45754
45755 /*
45756diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
45757index 40634b0..4f5855e 100644
45758--- a/drivers/lguest/x86/switcher_32.S
45759+++ b/drivers/lguest/x86/switcher_32.S
45760@@ -87,6 +87,7 @@
45761 #include <asm/page.h>
45762 #include <asm/segment.h>
45763 #include <asm/lguest.h>
45764+#include <asm/processor-flags.h>
45765
45766 // We mark the start of the code to copy
45767 // It's placed in .text tho it's never run here
45768@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
45769 // Changes type when we load it: damn Intel!
45770 // For after we switch over our page tables
45771 // That entry will be read-only: we'd crash.
45772+
45773+#ifdef CONFIG_PAX_KERNEXEC
45774+ mov %cr0, %edx
45775+ xor $X86_CR0_WP, %edx
45776+ mov %edx, %cr0
45777+#endif
45778+
45779 movl $(GDT_ENTRY_TSS*8), %edx
45780 ltr %dx
45781
45782@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
45783 // Let's clear it again for our return.
45784 // The GDT descriptor of the Host
45785 // Points to the table after two "size" bytes
45786- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
45787+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
45788 // Clear "used" from type field (byte 5, bit 2)
45789- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
45790+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
45791+
45792+#ifdef CONFIG_PAX_KERNEXEC
45793+ mov %cr0, %eax
45794+ xor $X86_CR0_WP, %eax
45795+ mov %eax, %cr0
45796+#endif
45797
45798 // Once our page table's switched, the Guest is live!
45799 // The Host fades as we run this final step.
45800@@ -295,13 +309,12 @@ deliver_to_host:
45801 // I consulted gcc, and it gave
45802 // These instructions, which I gladly credit:
45803 leal (%edx,%ebx,8), %eax
45804- movzwl (%eax),%edx
45805- movl 4(%eax), %eax
45806- xorw %ax, %ax
45807- orl %eax, %edx
45808+ movl 4(%eax), %edx
45809+ movw (%eax), %dx
45810 // Now the address of the handler's in %edx
45811 // We call it now: its "iret" drops us home.
45812- jmp *%edx
45813+ ljmp $__KERNEL_CS, $1f
45814+1: jmp *%edx
45815
45816 // Every interrupt can come to us here
45817 // But we must truly tell each apart.
45818diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
45819index a08e3ee..df8ade2 100644
45820--- a/drivers/md/bcache/closure.h
45821+++ b/drivers/md/bcache/closure.h
45822@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
45823 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
45824 struct workqueue_struct *wq)
45825 {
45826- BUG_ON(object_is_on_stack(cl));
45827+ BUG_ON(object_starts_on_stack(cl));
45828 closure_set_ip(cl);
45829 cl->fn = fn;
45830 cl->wq = wq;
45831diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
45832index 67f8b31..9418f2b 100644
45833--- a/drivers/md/bitmap.c
45834+++ b/drivers/md/bitmap.c
45835@@ -1775,7 +1775,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
45836 chunk_kb ? "KB" : "B");
45837 if (bitmap->storage.file) {
45838 seq_printf(seq, ", file: ");
45839- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
45840+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
45841 }
45842
45843 seq_printf(seq, "\n");
45844diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
45845index 5152142..623d141 100644
45846--- a/drivers/md/dm-ioctl.c
45847+++ b/drivers/md/dm-ioctl.c
45848@@ -1769,7 +1769,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
45849 cmd == DM_LIST_VERSIONS_CMD)
45850 return 0;
45851
45852- if ((cmd == DM_DEV_CREATE_CMD)) {
45853+ if (cmd == DM_DEV_CREATE_CMD) {
45854 if (!*param->name) {
45855 DMWARN("name not supplied when creating device");
45856 return -EINVAL;
45857diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
45858index 7dfdb5c..4caada6 100644
45859--- a/drivers/md/dm-raid1.c
45860+++ b/drivers/md/dm-raid1.c
45861@@ -40,7 +40,7 @@ enum dm_raid1_error {
45862
45863 struct mirror {
45864 struct mirror_set *ms;
45865- atomic_t error_count;
45866+ atomic_unchecked_t error_count;
45867 unsigned long error_type;
45868 struct dm_dev *dev;
45869 sector_t offset;
45870@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
45871 struct mirror *m;
45872
45873 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
45874- if (!atomic_read(&m->error_count))
45875+ if (!atomic_read_unchecked(&m->error_count))
45876 return m;
45877
45878 return NULL;
45879@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
45880 * simple way to tell if a device has encountered
45881 * errors.
45882 */
45883- atomic_inc(&m->error_count);
45884+ atomic_inc_unchecked(&m->error_count);
45885
45886 if (test_and_set_bit(error_type, &m->error_type))
45887 return;
45888@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
45889 struct mirror *m = get_default_mirror(ms);
45890
45891 do {
45892- if (likely(!atomic_read(&m->error_count)))
45893+ if (likely(!atomic_read_unchecked(&m->error_count)))
45894 return m;
45895
45896 if (m-- == ms->mirror)
45897@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
45898 {
45899 struct mirror *default_mirror = get_default_mirror(m->ms);
45900
45901- return !atomic_read(&default_mirror->error_count);
45902+ return !atomic_read_unchecked(&default_mirror->error_count);
45903 }
45904
45905 static int mirror_available(struct mirror_set *ms, struct bio *bio)
45906@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
45907 */
45908 if (likely(region_in_sync(ms, region, 1)))
45909 m = choose_mirror(ms, bio->bi_iter.bi_sector);
45910- else if (m && atomic_read(&m->error_count))
45911+ else if (m && atomic_read_unchecked(&m->error_count))
45912 m = NULL;
45913
45914 if (likely(m))
45915@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
45916 }
45917
45918 ms->mirror[mirror].ms = ms;
45919- atomic_set(&(ms->mirror[mirror].error_count), 0);
45920+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
45921 ms->mirror[mirror].error_type = 0;
45922 ms->mirror[mirror].offset = offset;
45923
45924@@ -1342,7 +1342,7 @@ static void mirror_resume(struct dm_target *ti)
45925 */
45926 static char device_status_char(struct mirror *m)
45927 {
45928- if (!atomic_read(&(m->error_count)))
45929+ if (!atomic_read_unchecked(&(m->error_count)))
45930 return 'A';
45931
45932 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
45933diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
45934index 28a9012..9c0f6a5 100644
45935--- a/drivers/md/dm-stats.c
45936+++ b/drivers/md/dm-stats.c
45937@@ -382,7 +382,7 @@ do_sync_free:
45938 synchronize_rcu_expedited();
45939 dm_stat_free(&s->rcu_head);
45940 } else {
45941- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
45942+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
45943 call_rcu(&s->rcu_head, dm_stat_free);
45944 }
45945 return 0;
45946@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
45947 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
45948 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
45949 ));
45950- ACCESS_ONCE(last->last_sector) = end_sector;
45951- ACCESS_ONCE(last->last_rw) = bi_rw;
45952+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
45953+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
45954 }
45955
45956 rcu_read_lock();
45957diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
45958index d1600d2..4c3af3a 100644
45959--- a/drivers/md/dm-stripe.c
45960+++ b/drivers/md/dm-stripe.c
45961@@ -21,7 +21,7 @@ struct stripe {
45962 struct dm_dev *dev;
45963 sector_t physical_start;
45964
45965- atomic_t error_count;
45966+ atomic_unchecked_t error_count;
45967 };
45968
45969 struct stripe_c {
45970@@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
45971 kfree(sc);
45972 return r;
45973 }
45974- atomic_set(&(sc->stripe[i].error_count), 0);
45975+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
45976 }
45977
45978 ti->private = sc;
45979@@ -330,7 +330,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
45980 DMEMIT("%d ", sc->stripes);
45981 for (i = 0; i < sc->stripes; i++) {
45982 DMEMIT("%s ", sc->stripe[i].dev->name);
45983- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
45984+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
45985 'D' : 'A';
45986 }
45987 buffer[i] = '\0';
45988@@ -375,8 +375,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
45989 */
45990 for (i = 0; i < sc->stripes; i++)
45991 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
45992- atomic_inc(&(sc->stripe[i].error_count));
45993- if (atomic_read(&(sc->stripe[i].error_count)) <
45994+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
45995+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
45996 DM_IO_ERROR_THRESHOLD)
45997 schedule_work(&sc->trigger_event);
45998 }
45999diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
46000index 5f59f1e..01bd02e 100644
46001--- a/drivers/md/dm-table.c
46002+++ b/drivers/md/dm-table.c
46003@@ -274,7 +274,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
46004 static int open_dev(struct dm_dev_internal *d, dev_t dev,
46005 struct mapped_device *md)
46006 {
46007- static char *_claim_ptr = "I belong to device-mapper";
46008+ static char _claim_ptr[] = "I belong to device-mapper";
46009 struct block_device *bdev;
46010
46011 int r;
46012@@ -342,7 +342,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
46013 if (!dev_size)
46014 return 0;
46015
46016- if ((start >= dev_size) || (start + len > dev_size)) {
46017+ if ((start >= dev_size) || (len > dev_size - start)) {
46018 DMWARN("%s: %s too small for target: "
46019 "start=%llu, len=%llu, dev_size=%llu",
46020 dm_device_name(ti->table->md), bdevname(bdev, b),
46021diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
46022index e9d33ad..dae9880d 100644
46023--- a/drivers/md/dm-thin-metadata.c
46024+++ b/drivers/md/dm-thin-metadata.c
46025@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
46026 {
46027 pmd->info.tm = pmd->tm;
46028 pmd->info.levels = 2;
46029- pmd->info.value_type.context = pmd->data_sm;
46030+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
46031 pmd->info.value_type.size = sizeof(__le64);
46032 pmd->info.value_type.inc = data_block_inc;
46033 pmd->info.value_type.dec = data_block_dec;
46034@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
46035
46036 pmd->bl_info.tm = pmd->tm;
46037 pmd->bl_info.levels = 1;
46038- pmd->bl_info.value_type.context = pmd->data_sm;
46039+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
46040 pmd->bl_info.value_type.size = sizeof(__le64);
46041 pmd->bl_info.value_type.inc = data_block_inc;
46042 pmd->bl_info.value_type.dec = data_block_dec;
46043diff --git a/drivers/md/dm.c b/drivers/md/dm.c
46044index 32b958d..34011e8 100644
46045--- a/drivers/md/dm.c
46046+++ b/drivers/md/dm.c
46047@@ -180,9 +180,9 @@ struct mapped_device {
46048 /*
46049 * Event handling.
46050 */
46051- atomic_t event_nr;
46052+ atomic_unchecked_t event_nr;
46053 wait_queue_head_t eventq;
46054- atomic_t uevent_seq;
46055+ atomic_unchecked_t uevent_seq;
46056 struct list_head uevent_list;
46057 spinlock_t uevent_lock; /* Protect access to uevent_list */
46058
46059@@ -1952,8 +1952,8 @@ static struct mapped_device *alloc_dev(int minor)
46060 spin_lock_init(&md->deferred_lock);
46061 atomic_set(&md->holders, 1);
46062 atomic_set(&md->open_count, 0);
46063- atomic_set(&md->event_nr, 0);
46064- atomic_set(&md->uevent_seq, 0);
46065+ atomic_set_unchecked(&md->event_nr, 0);
46066+ atomic_set_unchecked(&md->uevent_seq, 0);
46067 INIT_LIST_HEAD(&md->uevent_list);
46068 spin_lock_init(&md->uevent_lock);
46069
46070@@ -2107,7 +2107,7 @@ static void event_callback(void *context)
46071
46072 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
46073
46074- atomic_inc(&md->event_nr);
46075+ atomic_inc_unchecked(&md->event_nr);
46076 wake_up(&md->eventq);
46077 }
46078
46079@@ -2800,18 +2800,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
46080
46081 uint32_t dm_next_uevent_seq(struct mapped_device *md)
46082 {
46083- return atomic_add_return(1, &md->uevent_seq);
46084+ return atomic_add_return_unchecked(1, &md->uevent_seq);
46085 }
46086
46087 uint32_t dm_get_event_nr(struct mapped_device *md)
46088 {
46089- return atomic_read(&md->event_nr);
46090+ return atomic_read_unchecked(&md->event_nr);
46091 }
46092
46093 int dm_wait_event(struct mapped_device *md, int event_nr)
46094 {
46095 return wait_event_interruptible(md->eventq,
46096- (event_nr != atomic_read(&md->event_nr)));
46097+ (event_nr != atomic_read_unchecked(&md->event_nr)));
46098 }
46099
46100 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
46101diff --git a/drivers/md/md.c b/drivers/md/md.c
46102index 32fc19c..cb6eba3 100644
46103--- a/drivers/md/md.c
46104+++ b/drivers/md/md.c
46105@@ -194,10 +194,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
46106 * start build, activate spare
46107 */
46108 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
46109-static atomic_t md_event_count;
46110+static atomic_unchecked_t md_event_count;
46111 void md_new_event(struct mddev *mddev)
46112 {
46113- atomic_inc(&md_event_count);
46114+ atomic_inc_unchecked(&md_event_count);
46115 wake_up(&md_event_waiters);
46116 }
46117 EXPORT_SYMBOL_GPL(md_new_event);
46118@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
46119 */
46120 static void md_new_event_inintr(struct mddev *mddev)
46121 {
46122- atomic_inc(&md_event_count);
46123+ atomic_inc_unchecked(&md_event_count);
46124 wake_up(&md_event_waiters);
46125 }
46126
46127@@ -1462,7 +1462,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
46128 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
46129 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
46130 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
46131- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
46132+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
46133
46134 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
46135 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
46136@@ -1713,7 +1713,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
46137 else
46138 sb->resync_offset = cpu_to_le64(0);
46139
46140- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
46141+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
46142
46143 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
46144 sb->size = cpu_to_le64(mddev->dev_sectors);
46145@@ -2725,7 +2725,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
46146 static ssize_t
46147 errors_show(struct md_rdev *rdev, char *page)
46148 {
46149- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
46150+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
46151 }
46152
46153 static ssize_t
46154@@ -2734,7 +2734,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
46155 char *e;
46156 unsigned long n = simple_strtoul(buf, &e, 10);
46157 if (*buf && (*e == 0 || *e == '\n')) {
46158- atomic_set(&rdev->corrected_errors, n);
46159+ atomic_set_unchecked(&rdev->corrected_errors, n);
46160 return len;
46161 }
46162 return -EINVAL;
46163@@ -3183,8 +3183,8 @@ int md_rdev_init(struct md_rdev *rdev)
46164 rdev->sb_loaded = 0;
46165 rdev->bb_page = NULL;
46166 atomic_set(&rdev->nr_pending, 0);
46167- atomic_set(&rdev->read_errors, 0);
46168- atomic_set(&rdev->corrected_errors, 0);
46169+ atomic_set_unchecked(&rdev->read_errors, 0);
46170+ atomic_set_unchecked(&rdev->corrected_errors, 0);
46171
46172 INIT_LIST_HEAD(&rdev->same_set);
46173 init_waitqueue_head(&rdev->blocked_wait);
46174@@ -7068,7 +7068,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
46175
46176 spin_unlock(&pers_lock);
46177 seq_printf(seq, "\n");
46178- seq->poll_event = atomic_read(&md_event_count);
46179+ seq->poll_event = atomic_read_unchecked(&md_event_count);
46180 return 0;
46181 }
46182 if (v == (void*)2) {
46183@@ -7171,7 +7171,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
46184 return error;
46185
46186 seq = file->private_data;
46187- seq->poll_event = atomic_read(&md_event_count);
46188+ seq->poll_event = atomic_read_unchecked(&md_event_count);
46189 return error;
46190 }
46191
46192@@ -7188,7 +7188,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
46193 /* always allow read */
46194 mask = POLLIN | POLLRDNORM;
46195
46196- if (seq->poll_event != atomic_read(&md_event_count))
46197+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
46198 mask |= POLLERR | POLLPRI;
46199 return mask;
46200 }
46201@@ -7232,7 +7232,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
46202 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
46203 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
46204 (int)part_stat_read(&disk->part0, sectors[1]) -
46205- atomic_read(&disk->sync_io);
46206+ atomic_read_unchecked(&disk->sync_io);
46207 /* sync IO will cause sync_io to increase before the disk_stats
46208 * as sync_io is counted when a request starts, and
46209 * disk_stats is counted when it completes.
46210diff --git a/drivers/md/md.h b/drivers/md/md.h
46211index a49d991..3582bb7 100644
46212--- a/drivers/md/md.h
46213+++ b/drivers/md/md.h
46214@@ -94,13 +94,13 @@ struct md_rdev {
46215 * only maintained for arrays that
46216 * support hot removal
46217 */
46218- atomic_t read_errors; /* number of consecutive read errors that
46219+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
46220 * we have tried to ignore.
46221 */
46222 struct timespec last_read_error; /* monotonic time since our
46223 * last read error
46224 */
46225- atomic_t corrected_errors; /* number of corrected read errors,
46226+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
46227 * for reporting to userspace and storing
46228 * in superblock.
46229 */
46230@@ -449,7 +449,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
46231
46232 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
46233 {
46234- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
46235+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
46236 }
46237
46238 struct md_personality
46239diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
46240index 786b689..ea8c956 100644
46241--- a/drivers/md/persistent-data/dm-space-map-metadata.c
46242+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
46243@@ -679,7 +679,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
46244 * Flick into a mode where all blocks get allocated in the new area.
46245 */
46246 smm->begin = old_len;
46247- memcpy(sm, &bootstrap_ops, sizeof(*sm));
46248+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
46249
46250 /*
46251 * Extend.
46252@@ -710,7 +710,7 @@ out:
46253 /*
46254 * Switch back to normal behaviour.
46255 */
46256- memcpy(sm, &ops, sizeof(*sm));
46257+ memcpy((void *)sm, &ops, sizeof(*sm));
46258 return r;
46259 }
46260
46261diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
46262index 3e6d115..ffecdeb 100644
46263--- a/drivers/md/persistent-data/dm-space-map.h
46264+++ b/drivers/md/persistent-data/dm-space-map.h
46265@@ -71,6 +71,7 @@ struct dm_space_map {
46266 dm_sm_threshold_fn fn,
46267 void *context);
46268 };
46269+typedef struct dm_space_map __no_const dm_space_map_no_const;
46270
46271 /*----------------------------------------------------------------*/
46272
46273diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
46274index 56e24c0..e1c8e1f 100644
46275--- a/drivers/md/raid1.c
46276+++ b/drivers/md/raid1.c
46277@@ -1931,7 +1931,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
46278 if (r1_sync_page_io(rdev, sect, s,
46279 bio->bi_io_vec[idx].bv_page,
46280 READ) != 0)
46281- atomic_add(s, &rdev->corrected_errors);
46282+ atomic_add_unchecked(s, &rdev->corrected_errors);
46283 }
46284 sectors -= s;
46285 sect += s;
46286@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
46287 test_bit(In_sync, &rdev->flags)) {
46288 if (r1_sync_page_io(rdev, sect, s,
46289 conf->tmppage, READ)) {
46290- atomic_add(s, &rdev->corrected_errors);
46291+ atomic_add_unchecked(s, &rdev->corrected_errors);
46292 printk(KERN_INFO
46293 "md/raid1:%s: read error corrected "
46294 "(%d sectors at %llu on %s)\n",
46295diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
46296index cb882aa..cb8aeca 100644
46297--- a/drivers/md/raid10.c
46298+++ b/drivers/md/raid10.c
46299@@ -1949,7 +1949,7 @@ static void end_sync_read(struct bio *bio, int error)
46300 /* The write handler will notice the lack of
46301 * R10BIO_Uptodate and record any errors etc
46302 */
46303- atomic_add(r10_bio->sectors,
46304+ atomic_add_unchecked(r10_bio->sectors,
46305 &conf->mirrors[d].rdev->corrected_errors);
46306
46307 /* for reconstruct, we always reschedule after a read.
46308@@ -2307,7 +2307,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
46309 {
46310 struct timespec cur_time_mon;
46311 unsigned long hours_since_last;
46312- unsigned int read_errors = atomic_read(&rdev->read_errors);
46313+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
46314
46315 ktime_get_ts(&cur_time_mon);
46316
46317@@ -2329,9 +2329,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
46318 * overflowing the shift of read_errors by hours_since_last.
46319 */
46320 if (hours_since_last >= 8 * sizeof(read_errors))
46321- atomic_set(&rdev->read_errors, 0);
46322+ atomic_set_unchecked(&rdev->read_errors, 0);
46323 else
46324- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
46325+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
46326 }
46327
46328 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
46329@@ -2385,8 +2385,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
46330 return;
46331
46332 check_decay_read_errors(mddev, rdev);
46333- atomic_inc(&rdev->read_errors);
46334- if (atomic_read(&rdev->read_errors) > max_read_errors) {
46335+ atomic_inc_unchecked(&rdev->read_errors);
46336+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
46337 char b[BDEVNAME_SIZE];
46338 bdevname(rdev->bdev, b);
46339
46340@@ -2394,7 +2394,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
46341 "md/raid10:%s: %s: Raid device exceeded "
46342 "read_error threshold [cur %d:max %d]\n",
46343 mdname(mddev), b,
46344- atomic_read(&rdev->read_errors), max_read_errors);
46345+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
46346 printk(KERN_NOTICE
46347 "md/raid10:%s: %s: Failing raid device\n",
46348 mdname(mddev), b);
46349@@ -2549,7 +2549,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
46350 sect +
46351 choose_data_offset(r10_bio, rdev)),
46352 bdevname(rdev->bdev, b));
46353- atomic_add(s, &rdev->corrected_errors);
46354+ atomic_add_unchecked(s, &rdev->corrected_errors);
46355 }
46356
46357 rdev_dec_pending(rdev, mddev);
46358@@ -2954,6 +2954,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
46359 */
46360 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
46361 end_reshape(conf);
46362+ close_sync(conf);
46363 return 0;
46364 }
46365
46366@@ -4411,7 +4412,7 @@ read_more:
46367 read_bio->bi_private = r10_bio;
46368 read_bio->bi_end_io = end_sync_read;
46369 read_bio->bi_rw = READ;
46370- read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
46371+ read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
46372 read_bio->bi_flags |= 1 << BIO_UPTODATE;
46373 read_bio->bi_vcnt = 0;
46374 read_bio->bi_iter.bi_size = 0;
46375diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
46376index 6234b2e..4990801 100644
46377--- a/drivers/md/raid5.c
46378+++ b/drivers/md/raid5.c
46379@@ -1731,6 +1731,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
46380 return 1;
46381 }
46382
46383+#ifdef CONFIG_GRKERNSEC_HIDESYM
46384+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
46385+#endif
46386+
46387 static int grow_stripes(struct r5conf *conf, int num)
46388 {
46389 struct kmem_cache *sc;
46390@@ -1742,7 +1746,11 @@ static int grow_stripes(struct r5conf *conf, int num)
46391 "raid%d-%s", conf->level, mdname(conf->mddev));
46392 else
46393 sprintf(conf->cache_name[0],
46394+#ifdef CONFIG_GRKERNSEC_HIDESYM
46395+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
46396+#else
46397 "raid%d-%p", conf->level, conf->mddev);
46398+#endif
46399 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
46400
46401 conf->active_name = 0;
46402@@ -2018,21 +2026,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
46403 mdname(conf->mddev), STRIPE_SECTORS,
46404 (unsigned long long)s,
46405 bdevname(rdev->bdev, b));
46406- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
46407+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
46408 clear_bit(R5_ReadError, &sh->dev[i].flags);
46409 clear_bit(R5_ReWrite, &sh->dev[i].flags);
46410 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
46411 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
46412
46413- if (atomic_read(&rdev->read_errors))
46414- atomic_set(&rdev->read_errors, 0);
46415+ if (atomic_read_unchecked(&rdev->read_errors))
46416+ atomic_set_unchecked(&rdev->read_errors, 0);
46417 } else {
46418 const char *bdn = bdevname(rdev->bdev, b);
46419 int retry = 0;
46420 int set_bad = 0;
46421
46422 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
46423- atomic_inc(&rdev->read_errors);
46424+ atomic_inc_unchecked(&rdev->read_errors);
46425 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
46426 printk_ratelimited(
46427 KERN_WARNING
46428@@ -2060,7 +2068,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
46429 mdname(conf->mddev),
46430 (unsigned long long)s,
46431 bdn);
46432- } else if (atomic_read(&rdev->read_errors)
46433+ } else if (atomic_read_unchecked(&rdev->read_errors)
46434 > conf->max_nr_stripes)
46435 printk(KERN_WARNING
46436 "md/raid:%s: Too many read errors, failing device %s.\n",
46437@@ -3817,6 +3825,8 @@ static void handle_stripe(struct stripe_head *sh)
46438 set_bit(R5_Wantwrite, &dev->flags);
46439 if (prexor)
46440 continue;
46441+ if (s.failed > 1)
46442+ continue;
46443 if (!test_bit(R5_Insync, &dev->flags) ||
46444 ((i == sh->pd_idx || i == sh->qd_idx) &&
46445 s.failed == 0))
46446diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
46447index 983db75..ef9248c 100644
46448--- a/drivers/media/dvb-core/dvbdev.c
46449+++ b/drivers/media/dvb-core/dvbdev.c
46450@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
46451 const struct dvb_device *template, void *priv, int type)
46452 {
46453 struct dvb_device *dvbdev;
46454- struct file_operations *dvbdevfops;
46455+ file_operations_no_const *dvbdevfops;
46456 struct device *clsdev;
46457 int minor;
46458 int id;
46459diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
46460index 539f4db..cdd403b 100644
46461--- a/drivers/media/dvb-frontends/af9033.h
46462+++ b/drivers/media/dvb-frontends/af9033.h
46463@@ -82,7 +82,7 @@ struct af9033_ops {
46464 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
46465 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
46466 int onoff);
46467-};
46468+} __no_const;
46469
46470
46471 #if IS_ENABLED(CONFIG_DVB_AF9033)
46472diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
46473index 9b6c3bb..baeb5c7 100644
46474--- a/drivers/media/dvb-frontends/dib3000.h
46475+++ b/drivers/media/dvb-frontends/dib3000.h
46476@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
46477 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
46478 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
46479 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
46480-};
46481+} __no_const;
46482
46483 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
46484 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
46485diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
46486index ed8cb90..5ef7f79 100644
46487--- a/drivers/media/pci/cx88/cx88-video.c
46488+++ b/drivers/media/pci/cx88/cx88-video.c
46489@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
46490
46491 /* ------------------------------------------------------------------ */
46492
46493-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46494-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46495-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46496+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46497+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46498+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46499
46500 module_param_array(video_nr, int, NULL, 0444);
46501 module_param_array(vbi_nr, int, NULL, 0444);
46502diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
46503index 802642d..5534900 100644
46504--- a/drivers/media/pci/ivtv/ivtv-driver.c
46505+++ b/drivers/media/pci/ivtv/ivtv-driver.c
46506@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
46507 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
46508
46509 /* ivtv instance counter */
46510-static atomic_t ivtv_instance = ATOMIC_INIT(0);
46511+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
46512
46513 /* Parameter declarations */
46514 static int cardtype[IVTV_MAX_CARDS];
46515diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
46516index 9a726ea..f5e9b52 100644
46517--- a/drivers/media/platform/omap/omap_vout.c
46518+++ b/drivers/media/platform/omap/omap_vout.c
46519@@ -63,7 +63,6 @@ enum omap_vout_channels {
46520 OMAP_VIDEO2,
46521 };
46522
46523-static struct videobuf_queue_ops video_vbq_ops;
46524 /* Variables configurable through module params*/
46525 static u32 video1_numbuffers = 3;
46526 static u32 video2_numbuffers = 3;
46527@@ -1015,6 +1014,12 @@ static int omap_vout_open(struct file *file)
46528 {
46529 struct videobuf_queue *q;
46530 struct omap_vout_device *vout = NULL;
46531+ static struct videobuf_queue_ops video_vbq_ops = {
46532+ .buf_setup = omap_vout_buffer_setup,
46533+ .buf_prepare = omap_vout_buffer_prepare,
46534+ .buf_release = omap_vout_buffer_release,
46535+ .buf_queue = omap_vout_buffer_queue,
46536+ };
46537
46538 vout = video_drvdata(file);
46539 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
46540@@ -1032,10 +1037,6 @@ static int omap_vout_open(struct file *file)
46541 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
46542
46543 q = &vout->vbq;
46544- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
46545- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
46546- video_vbq_ops.buf_release = omap_vout_buffer_release;
46547- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
46548 spin_lock_init(&vout->vbq_lock);
46549
46550 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
46551diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
46552index fb2acc5..a2fcbdc4 100644
46553--- a/drivers/media/platform/s5p-tv/mixer.h
46554+++ b/drivers/media/platform/s5p-tv/mixer.h
46555@@ -156,7 +156,7 @@ struct mxr_layer {
46556 /** layer index (unique identifier) */
46557 int idx;
46558 /** callbacks for layer methods */
46559- struct mxr_layer_ops ops;
46560+ struct mxr_layer_ops *ops;
46561 /** format array */
46562 const struct mxr_format **fmt_array;
46563 /** size of format array */
46564diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46565index 74344c7..a39e70e 100644
46566--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46567+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46568@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
46569 {
46570 struct mxr_layer *layer;
46571 int ret;
46572- struct mxr_layer_ops ops = {
46573+ static struct mxr_layer_ops ops = {
46574 .release = mxr_graph_layer_release,
46575 .buffer_set = mxr_graph_buffer_set,
46576 .stream_set = mxr_graph_stream_set,
46577diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
46578index b713403..53cb5ad 100644
46579--- a/drivers/media/platform/s5p-tv/mixer_reg.c
46580+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
46581@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
46582 layer->update_buf = next;
46583 }
46584
46585- layer->ops.buffer_set(layer, layer->update_buf);
46586+ layer->ops->buffer_set(layer, layer->update_buf);
46587
46588 if (done && done != layer->shadow_buf)
46589 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
46590diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
46591index 8a8dbc8..b74c62d 100644
46592--- a/drivers/media/platform/s5p-tv/mixer_video.c
46593+++ b/drivers/media/platform/s5p-tv/mixer_video.c
46594@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
46595 layer->geo.src.height = layer->geo.src.full_height;
46596
46597 mxr_geometry_dump(mdev, &layer->geo);
46598- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46599+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46600 mxr_geometry_dump(mdev, &layer->geo);
46601 }
46602
46603@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
46604 layer->geo.dst.full_width = mbus_fmt.width;
46605 layer->geo.dst.full_height = mbus_fmt.height;
46606 layer->geo.dst.field = mbus_fmt.field;
46607- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46608+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46609
46610 mxr_geometry_dump(mdev, &layer->geo);
46611 }
46612@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
46613 /* set source size to highest accepted value */
46614 geo->src.full_width = max(geo->dst.full_width, pix->width);
46615 geo->src.full_height = max(geo->dst.full_height, pix->height);
46616- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46617+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46618 mxr_geometry_dump(mdev, &layer->geo);
46619 /* set cropping to total visible screen */
46620 geo->src.width = pix->width;
46621@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
46622 geo->src.x_offset = 0;
46623 geo->src.y_offset = 0;
46624 /* assure consistency of geometry */
46625- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46626+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46627 mxr_geometry_dump(mdev, &layer->geo);
46628 /* set full size to lowest possible value */
46629 geo->src.full_width = 0;
46630 geo->src.full_height = 0;
46631- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46632+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46633 mxr_geometry_dump(mdev, &layer->geo);
46634
46635 /* returning results */
46636@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
46637 target->width = s->r.width;
46638 target->height = s->r.height;
46639
46640- layer->ops.fix_geometry(layer, stage, s->flags);
46641+ layer->ops->fix_geometry(layer, stage, s->flags);
46642
46643 /* retrieve update selection rectangle */
46644 res.left = target->x_offset;
46645@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
46646 mxr_output_get(mdev);
46647
46648 mxr_layer_update_output(layer);
46649- layer->ops.format_set(layer);
46650+ layer->ops->format_set(layer);
46651 /* enabling layer in hardware */
46652 spin_lock_irqsave(&layer->enq_slock, flags);
46653 layer->state = MXR_LAYER_STREAMING;
46654 spin_unlock_irqrestore(&layer->enq_slock, flags);
46655
46656- layer->ops.stream_set(layer, MXR_ENABLE);
46657+ layer->ops->stream_set(layer, MXR_ENABLE);
46658 mxr_streamer_get(mdev);
46659
46660 return 0;
46661@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
46662 spin_unlock_irqrestore(&layer->enq_slock, flags);
46663
46664 /* disabling layer in hardware */
46665- layer->ops.stream_set(layer, MXR_DISABLE);
46666+ layer->ops->stream_set(layer, MXR_DISABLE);
46667 /* remove one streamer */
46668 mxr_streamer_put(mdev);
46669 /* allow changes in output configuration */
46670@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
46671
46672 void mxr_layer_release(struct mxr_layer *layer)
46673 {
46674- if (layer->ops.release)
46675- layer->ops.release(layer);
46676+ if (layer->ops->release)
46677+ layer->ops->release(layer);
46678 }
46679
46680 void mxr_base_layer_release(struct mxr_layer *layer)
46681@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
46682
46683 layer->mdev = mdev;
46684 layer->idx = idx;
46685- layer->ops = *ops;
46686+ layer->ops = ops;
46687
46688 spin_lock_init(&layer->enq_slock);
46689 INIT_LIST_HEAD(&layer->enq_list);
46690diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46691index c9388c4..ce71ece 100644
46692--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46693+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46694@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
46695 {
46696 struct mxr_layer *layer;
46697 int ret;
46698- struct mxr_layer_ops ops = {
46699+ static struct mxr_layer_ops ops = {
46700 .release = mxr_vp_layer_release,
46701 .buffer_set = mxr_vp_buffer_set,
46702 .stream_set = mxr_vp_stream_set,
46703diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
46704index d00bf3d..1301a0c 100644
46705--- a/drivers/media/platform/vivi.c
46706+++ b/drivers/media/platform/vivi.c
46707@@ -58,8 +58,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
46708 MODULE_LICENSE("Dual BSD/GPL");
46709 MODULE_VERSION(VIVI_VERSION);
46710
46711-static unsigned video_nr = -1;
46712-module_param(video_nr, uint, 0644);
46713+static int video_nr = -1;
46714+module_param(video_nr, int, 0644);
46715 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
46716
46717 static unsigned n_devs = 1;
46718diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
46719index d719e59..63f3470 100644
46720--- a/drivers/media/radio/radio-cadet.c
46721+++ b/drivers/media/radio/radio-cadet.c
46722@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46723 unsigned char readbuf[RDS_BUFFER];
46724 int i = 0;
46725
46726+ if (count > RDS_BUFFER)
46727+ return -EFAULT;
46728 mutex_lock(&dev->lock);
46729 if (dev->rdsstat == 0)
46730 cadet_start_rds(dev);
46731@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46732 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
46733 mutex_unlock(&dev->lock);
46734
46735- if (i && copy_to_user(data, readbuf, i))
46736- return -EFAULT;
46737+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
46738+ i = -EFAULT;
46739+
46740 return i;
46741 }
46742
46743diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
46744index 5236035..c622c74 100644
46745--- a/drivers/media/radio/radio-maxiradio.c
46746+++ b/drivers/media/radio/radio-maxiradio.c
46747@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
46748 /* TEA5757 pin mappings */
46749 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
46750
46751-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
46752+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
46753
46754 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
46755 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
46756diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
46757index 050b3bb..79f62b9 100644
46758--- a/drivers/media/radio/radio-shark.c
46759+++ b/drivers/media/radio/radio-shark.c
46760@@ -79,7 +79,7 @@ struct shark_device {
46761 u32 last_val;
46762 };
46763
46764-static atomic_t shark_instance = ATOMIC_INIT(0);
46765+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46766
46767 static void shark_write_val(struct snd_tea575x *tea, u32 val)
46768 {
46769diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
46770index 8654e0d..0608a64 100644
46771--- a/drivers/media/radio/radio-shark2.c
46772+++ b/drivers/media/radio/radio-shark2.c
46773@@ -74,7 +74,7 @@ struct shark_device {
46774 u8 *transfer_buffer;
46775 };
46776
46777-static atomic_t shark_instance = ATOMIC_INIT(0);
46778+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46779
46780 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
46781 {
46782diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
46783index 2fd9009..278cc1e 100644
46784--- a/drivers/media/radio/radio-si476x.c
46785+++ b/drivers/media/radio/radio-si476x.c
46786@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
46787 struct si476x_radio *radio;
46788 struct v4l2_ctrl *ctrl;
46789
46790- static atomic_t instance = ATOMIC_INIT(0);
46791+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
46792
46793 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
46794 if (!radio)
46795diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
46796index a1c641e..3007da9 100644
46797--- a/drivers/media/usb/dvb-usb/cxusb.c
46798+++ b/drivers/media/usb/dvb-usb/cxusb.c
46799@@ -1112,7 +1112,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
46800
46801 struct dib0700_adapter_state {
46802 int (*set_param_save) (struct dvb_frontend *);
46803-};
46804+} __no_const;
46805
46806 static int dib7070_set_param_override(struct dvb_frontend *fe)
46807 {
46808diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46809index 733a7ff..f8b52e3 100644
46810--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46811+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46812@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
46813
46814 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
46815 {
46816- struct hexline hx;
46817- u8 reset;
46818+ struct hexline *hx;
46819+ u8 *reset;
46820 int ret,pos=0;
46821
46822+ reset = kmalloc(1, GFP_KERNEL);
46823+ if (reset == NULL)
46824+ return -ENOMEM;
46825+
46826+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
46827+ if (hx == NULL) {
46828+ kfree(reset);
46829+ return -ENOMEM;
46830+ }
46831+
46832 /* stop the CPU */
46833- reset = 1;
46834- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
46835+ reset[0] = 1;
46836+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
46837 err("could not stop the USB controller CPU.");
46838
46839- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
46840- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
46841- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
46842+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
46843+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
46844+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
46845
46846- if (ret != hx.len) {
46847+ if (ret != hx->len) {
46848 err("error while transferring firmware "
46849 "(transferred size: %d, block size: %d)",
46850- ret,hx.len);
46851+ ret,hx->len);
46852 ret = -EINVAL;
46853 break;
46854 }
46855 }
46856 if (ret < 0) {
46857 err("firmware download failed at %d with %d",pos,ret);
46858+ kfree(reset);
46859+ kfree(hx);
46860 return ret;
46861 }
46862
46863 if (ret == 0) {
46864 /* restart the CPU */
46865- reset = 0;
46866- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
46867+ reset[0] = 0;
46868+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
46869 err("could not restart the USB controller CPU.");
46870 ret = -EINVAL;
46871 }
46872 } else
46873 ret = -EIO;
46874
46875+ kfree(reset);
46876+ kfree(hx);
46877+
46878 return ret;
46879 }
46880 EXPORT_SYMBOL(usb_cypress_load_firmware);
46881diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
46882index ae0f56a..ec71784 100644
46883--- a/drivers/media/usb/dvb-usb/dw2102.c
46884+++ b/drivers/media/usb/dvb-usb/dw2102.c
46885@@ -118,7 +118,7 @@ struct su3000_state {
46886
46887 struct s6x0_state {
46888 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
46889-};
46890+} __no_const;
46891
46892 /* debug */
46893 static int dvb_usb_dw2102_debug;
46894diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
46895index d947e03..87fef42 100644
46896--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
46897+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
46898@@ -87,8 +87,11 @@ struct technisat_usb2_state {
46899 static int technisat_usb2_i2c_access(struct usb_device *udev,
46900 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
46901 {
46902- u8 b[64];
46903- int ret, actual_length;
46904+ u8 *b = kmalloc(64, GFP_KERNEL);
46905+ int ret, actual_length, error = 0;
46906+
46907+ if (b == NULL)
46908+ return -ENOMEM;
46909
46910 deb_i2c("i2c-access: %02x, tx: ", device_addr);
46911 debug_dump(tx, txlen, deb_i2c);
46912@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46913
46914 if (ret < 0) {
46915 err("i2c-error: out failed %02x = %d", device_addr, ret);
46916- return -ENODEV;
46917+ error = -ENODEV;
46918+ goto out;
46919 }
46920
46921 ret = usb_bulk_msg(udev,
46922@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46923 b, 64, &actual_length, 1000);
46924 if (ret < 0) {
46925 err("i2c-error: in failed %02x = %d", device_addr, ret);
46926- return -ENODEV;
46927+ error = -ENODEV;
46928+ goto out;
46929 }
46930
46931 if (b[0] != I2C_STATUS_OK) {
46932@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46933 /* handle tuner-i2c-nak */
46934 if (!(b[0] == I2C_STATUS_NAK &&
46935 device_addr == 0x60
46936- /* && device_is_technisat_usb2 */))
46937- return -ENODEV;
46938+ /* && device_is_technisat_usb2 */)) {
46939+ error = -ENODEV;
46940+ goto out;
46941+ }
46942 }
46943
46944 deb_i2c("status: %d, ", b[0]);
46945@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46946
46947 deb_i2c("\n");
46948
46949- return 0;
46950+out:
46951+ kfree(b);
46952+ return error;
46953 }
46954
46955 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
46956@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
46957 {
46958 int ret;
46959
46960- u8 led[8] = {
46961- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
46962- 0
46963- };
46964+ u8 *led = kzalloc(8, GFP_KERNEL);
46965+
46966+ if (led == NULL)
46967+ return -ENOMEM;
46968
46969 if (disable_led_control && state != TECH_LED_OFF)
46970 return 0;
46971
46972+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
46973+
46974 switch (state) {
46975 case TECH_LED_ON:
46976 led[1] = 0x82;
46977@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
46978 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
46979 USB_TYPE_VENDOR | USB_DIR_OUT,
46980 0, 0,
46981- led, sizeof(led), 500);
46982+ led, 8, 500);
46983
46984 mutex_unlock(&d->i2c_mutex);
46985+
46986+ kfree(led);
46987+
46988 return ret;
46989 }
46990
46991 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
46992 {
46993 int ret;
46994- u8 b = 0;
46995+ u8 *b = kzalloc(1, GFP_KERNEL);
46996+
46997+ if (b == NULL)
46998+ return -ENOMEM;
46999
47000 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
47001 return -EAGAIN;
47002@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
47003 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
47004 USB_TYPE_VENDOR | USB_DIR_OUT,
47005 (red << 8) | green, 0,
47006- &b, 1, 500);
47007+ b, 1, 500);
47008
47009 mutex_unlock(&d->i2c_mutex);
47010
47011+ kfree(b);
47012+
47013 return ret;
47014 }
47015
47016@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47017 struct dvb_usb_device_description **desc, int *cold)
47018 {
47019 int ret;
47020- u8 version[3];
47021+ u8 *version = kmalloc(3, GFP_KERNEL);
47022
47023 /* first select the interface */
47024 if (usb_set_interface(udev, 0, 1) != 0)
47025@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47026
47027 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
47028
47029+ if (version == NULL)
47030+ return 0;
47031+
47032 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
47033 GET_VERSION_INFO_VENDOR_REQUEST,
47034 USB_TYPE_VENDOR | USB_DIR_IN,
47035 0, 0,
47036- version, sizeof(version), 500);
47037+ version, 3, 500);
47038
47039 if (ret < 0)
47040 *cold = 1;
47041@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47042 *cold = 0;
47043 }
47044
47045+ kfree(version);
47046+
47047 return 0;
47048 }
47049
47050@@ -591,10 +615,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
47051
47052 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47053 {
47054- u8 buf[62], *b;
47055+ u8 *buf, *b;
47056 int ret;
47057 struct ir_raw_event ev;
47058
47059+ buf = kmalloc(62, GFP_KERNEL);
47060+
47061+ if (buf == NULL)
47062+ return -ENOMEM;
47063+
47064 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
47065 buf[1] = 0x08;
47066 buf[2] = 0x8f;
47067@@ -617,16 +646,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47068 GET_IR_DATA_VENDOR_REQUEST,
47069 USB_TYPE_VENDOR | USB_DIR_IN,
47070 0x8080, 0,
47071- buf, sizeof(buf), 500);
47072+ buf, 62, 500);
47073
47074 unlock:
47075 mutex_unlock(&d->i2c_mutex);
47076
47077- if (ret < 0)
47078+ if (ret < 0) {
47079+ kfree(buf);
47080 return ret;
47081+ }
47082
47083- if (ret == 1)
47084+ if (ret == 1) {
47085+ kfree(buf);
47086 return 0; /* no key pressed */
47087+ }
47088
47089 /* decoding */
47090 b = buf+1;
47091@@ -653,6 +686,8 @@ unlock:
47092
47093 ir_raw_event_handle(d->rc_dev);
47094
47095+ kfree(buf);
47096+
47097 return 1;
47098 }
47099
47100diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47101index 7e2411c..cef73ca 100644
47102--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47103+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47104@@ -328,7 +328,7 @@ struct v4l2_buffer32 {
47105 __u32 reserved;
47106 };
47107
47108-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47109+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
47110 enum v4l2_memory memory)
47111 {
47112 void __user *up_pln;
47113@@ -357,7 +357,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47114 return 0;
47115 }
47116
47117-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47118+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
47119 enum v4l2_memory memory)
47120 {
47121 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
47122@@ -427,7 +427,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47123 * by passing a very big num_planes value */
47124 uplane = compat_alloc_user_space(num_planes *
47125 sizeof(struct v4l2_plane));
47126- kp->m.planes = uplane;
47127+ kp->m.planes = (struct v4l2_plane __force_kernel *)uplane;
47128
47129 while (--num_planes >= 0) {
47130 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
47131@@ -498,7 +498,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47132 if (num_planes == 0)
47133 return 0;
47134
47135- uplane = kp->m.planes;
47136+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
47137 if (get_user(p, &up->m.planes))
47138 return -EFAULT;
47139 uplane32 = compat_ptr(p);
47140@@ -552,7 +552,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
47141 get_user(kp->capability, &up->capability) ||
47142 get_user(kp->flags, &up->flags))
47143 return -EFAULT;
47144- kp->base = compat_ptr(tmp);
47145+ kp->base = (void __force_kernel *)compat_ptr(tmp);
47146 get_v4l2_pix_format(&kp->fmt, &up->fmt);
47147 return 0;
47148 }
47149@@ -658,7 +658,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47150 n * sizeof(struct v4l2_ext_control32)))
47151 return -EFAULT;
47152 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
47153- kp->controls = kcontrols;
47154+ kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols;
47155 while (--n >= 0) {
47156 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
47157 return -EFAULT;
47158@@ -680,7 +680,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47159 static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
47160 {
47161 struct v4l2_ext_control32 __user *ucontrols;
47162- struct v4l2_ext_control __user *kcontrols = kp->controls;
47163+ struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user *)kp->controls;
47164 int n = kp->count;
47165 compat_caddr_t p;
47166
47167@@ -774,7 +774,7 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
47168 put_user(kp->start_block, &up->start_block) ||
47169 put_user(kp->blocks, &up->blocks) ||
47170 put_user(tmp, &up->edid) ||
47171- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
47172+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
47173 return -EFAULT;
47174 return 0;
47175 }
47176diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
47177index 55c6832..a91c7a6 100644
47178--- a/drivers/media/v4l2-core/v4l2-ctrls.c
47179+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
47180@@ -1431,8 +1431,8 @@ static int validate_new(const struct v4l2_ctrl *ctrl,
47181 return 0;
47182
47183 case V4L2_CTRL_TYPE_STRING:
47184- len = strlen(c->string);
47185- if (len < ctrl->minimum)
47186+ len = strlen_user(c->string);
47187+ if (!len || len < ctrl->minimum)
47188 return -ERANGE;
47189 if ((len - ctrl->minimum) % ctrl->step)
47190 return -ERANGE;
47191diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
47192index 015f92a..59e311e 100644
47193--- a/drivers/media/v4l2-core/v4l2-device.c
47194+++ b/drivers/media/v4l2-core/v4l2-device.c
47195@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
47196 EXPORT_SYMBOL_GPL(v4l2_device_put);
47197
47198 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
47199- atomic_t *instance)
47200+ atomic_unchecked_t *instance)
47201 {
47202- int num = atomic_inc_return(instance) - 1;
47203+ int num = atomic_inc_return_unchecked(instance) - 1;
47204 int len = strlen(basename);
47205
47206 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
47207diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
47208index 16bffd8..3ab516a 100644
47209--- a/drivers/media/v4l2-core/v4l2-ioctl.c
47210+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
47211@@ -2003,7 +2003,8 @@ struct v4l2_ioctl_info {
47212 struct file *file, void *fh, void *p);
47213 } u;
47214 void (*debug)(const void *arg, bool write_only);
47215-};
47216+} __do_const;
47217+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
47218
47219 /* This control needs a priority check */
47220 #define INFO_FL_PRIO (1 << 0)
47221@@ -2186,7 +2187,7 @@ static long __video_do_ioctl(struct file *file,
47222 struct video_device *vfd = video_devdata(file);
47223 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
47224 bool write_only = false;
47225- struct v4l2_ioctl_info default_info;
47226+ v4l2_ioctl_info_no_const default_info;
47227 const struct v4l2_ioctl_info *info;
47228 void *fh = file->private_data;
47229 struct v4l2_fh *vfh = NULL;
47230@@ -2276,7 +2277,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47231 ret = -EINVAL;
47232 break;
47233 }
47234- *user_ptr = (void __user *)buf->m.planes;
47235+ *user_ptr = (void __force_user *)buf->m.planes;
47236 *kernel_ptr = (void **)&buf->m.planes;
47237 *array_size = sizeof(struct v4l2_plane) * buf->length;
47238 ret = 1;
47239@@ -2293,7 +2294,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47240 ret = -EINVAL;
47241 break;
47242 }
47243- *user_ptr = (void __user *)edid->edid;
47244+ *user_ptr = (void __force_user *)edid->edid;
47245 *kernel_ptr = (void **)&edid->edid;
47246 *array_size = edid->blocks * 128;
47247 ret = 1;
47248@@ -2311,7 +2312,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47249 ret = -EINVAL;
47250 break;
47251 }
47252- *user_ptr = (void __user *)ctrls->controls;
47253+ *user_ptr = (void __force_user *)ctrls->controls;
47254 *kernel_ptr = (void **)&ctrls->controls;
47255 *array_size = sizeof(struct v4l2_ext_control)
47256 * ctrls->count;
47257@@ -2412,7 +2413,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
47258 }
47259
47260 if (has_array_args) {
47261- *kernel_ptr = (void __force *)user_ptr;
47262+ *kernel_ptr = (void __force_kernel *)user_ptr;
47263 if (copy_to_user(user_ptr, mbuf, array_size))
47264 err = -EFAULT;
47265 goto out_array_args;
47266diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
47267index ebc0af7..baed058 100644
47268--- a/drivers/message/fusion/mptbase.c
47269+++ b/drivers/message/fusion/mptbase.c
47270@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47271 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
47272 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
47273
47274+#ifdef CONFIG_GRKERNSEC_HIDESYM
47275+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
47276+#else
47277 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
47278 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
47279+#endif
47280+
47281 /*
47282 * Rounding UP to nearest 4-kB boundary here...
47283 */
47284@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47285 ioc->facts.GlobalCredits);
47286
47287 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
47288+#ifdef CONFIG_GRKERNSEC_HIDESYM
47289+ NULL, NULL);
47290+#else
47291 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
47292+#endif
47293 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
47294 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
47295 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
47296diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
47297index 711fcb5..5da1fb0 100644
47298--- a/drivers/message/fusion/mptsas.c
47299+++ b/drivers/message/fusion/mptsas.c
47300@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
47301 return 0;
47302 }
47303
47304+static inline void
47305+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47306+{
47307+ if (phy_info->port_details) {
47308+ phy_info->port_details->rphy = rphy;
47309+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47310+ ioc->name, rphy));
47311+ }
47312+
47313+ if (rphy) {
47314+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47315+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47316+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47317+ ioc->name, rphy, rphy->dev.release));
47318+ }
47319+}
47320+
47321 /* no mutex */
47322 static void
47323 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
47324@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
47325 return NULL;
47326 }
47327
47328-static inline void
47329-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47330-{
47331- if (phy_info->port_details) {
47332- phy_info->port_details->rphy = rphy;
47333- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47334- ioc->name, rphy));
47335- }
47336-
47337- if (rphy) {
47338- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47339- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47340- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47341- ioc->name, rphy, rphy->dev.release));
47342- }
47343-}
47344-
47345 static inline struct sas_port *
47346 mptsas_get_port(struct mptsas_phyinfo *phy_info)
47347 {
47348diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
47349index 2a1c6f2..a04c6a2 100644
47350--- a/drivers/message/fusion/mptscsih.c
47351+++ b/drivers/message/fusion/mptscsih.c
47352@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
47353
47354 h = shost_priv(SChost);
47355
47356- if (h) {
47357- if (h->info_kbuf == NULL)
47358- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
47359- return h->info_kbuf;
47360- h->info_kbuf[0] = '\0';
47361+ if (!h)
47362+ return NULL;
47363
47364- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
47365- h->info_kbuf[size-1] = '\0';
47366- }
47367+ if (h->info_kbuf == NULL)
47368+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
47369+ return h->info_kbuf;
47370+ h->info_kbuf[0] = '\0';
47371+
47372+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
47373+ h->info_kbuf[size-1] = '\0';
47374
47375 return h->info_kbuf;
47376 }
47377diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
47378index b7d87cd..3fb36da 100644
47379--- a/drivers/message/i2o/i2o_proc.c
47380+++ b/drivers/message/i2o/i2o_proc.c
47381@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
47382 "Array Controller Device"
47383 };
47384
47385-static char *chtostr(char *tmp, u8 *chars, int n)
47386-{
47387- tmp[0] = 0;
47388- return strncat(tmp, (char *)chars, n);
47389-}
47390-
47391 static int i2o_report_query_status(struct seq_file *seq, int block_status,
47392 char *group)
47393 {
47394@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
47395 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
47396 {
47397 struct i2o_controller *c = (struct i2o_controller *)seq->private;
47398- static u32 work32[5];
47399- static u8 *work8 = (u8 *) work32;
47400- static u16 *work16 = (u16 *) work32;
47401+ u32 work32[5];
47402+ u8 *work8 = (u8 *) work32;
47403+ u16 *work16 = (u16 *) work32;
47404 int token;
47405 u32 hwcap;
47406
47407@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
47408 } *result;
47409
47410 i2o_exec_execute_ddm_table ddm_table;
47411- char tmp[28 + 1];
47412
47413 result = kmalloc(sizeof(*result), GFP_KERNEL);
47414 if (!result)
47415@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
47416
47417 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
47418 seq_printf(seq, "%-#8x", ddm_table.module_id);
47419- seq_printf(seq, "%-29s",
47420- chtostr(tmp, ddm_table.module_name_version, 28));
47421+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
47422 seq_printf(seq, "%9d ", ddm_table.data_size);
47423 seq_printf(seq, "%8d", ddm_table.code_size);
47424
47425@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
47426
47427 i2o_driver_result_table *result;
47428 i2o_driver_store_table *dst;
47429- char tmp[28 + 1];
47430
47431 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
47432 if (result == NULL)
47433@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
47434
47435 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
47436 seq_printf(seq, "%-#8x", dst->module_id);
47437- seq_printf(seq, "%-29s",
47438- chtostr(tmp, dst->module_name_version, 28));
47439- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
47440+ seq_printf(seq, "%-.28s", dst->module_name_version);
47441+ seq_printf(seq, "%-.8s", dst->date);
47442 seq_printf(seq, "%8d ", dst->module_size);
47443 seq_printf(seq, "%8d ", dst->mpb_size);
47444 seq_printf(seq, "0x%04x", dst->module_flags);
47445@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
47446 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
47447 {
47448 struct i2o_device *d = (struct i2o_device *)seq->private;
47449- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
47450+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
47451 // == (allow) 512d bytes (max)
47452- static u16 *work16 = (u16 *) work32;
47453+ u16 *work16 = (u16 *) work32;
47454 int token;
47455- char tmp[16 + 1];
47456
47457 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
47458
47459@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
47460 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
47461 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
47462 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
47463- seq_printf(seq, "Vendor info : %s\n",
47464- chtostr(tmp, (u8 *) (work32 + 2), 16));
47465- seq_printf(seq, "Product info : %s\n",
47466- chtostr(tmp, (u8 *) (work32 + 6), 16));
47467- seq_printf(seq, "Description : %s\n",
47468- chtostr(tmp, (u8 *) (work32 + 10), 16));
47469- seq_printf(seq, "Product rev. : %s\n",
47470- chtostr(tmp, (u8 *) (work32 + 14), 8));
47471+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
47472+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
47473+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
47474+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
47475
47476 seq_printf(seq, "Serial number : ");
47477 print_serial_number(seq, (u8 *) (work32 + 16),
47478@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
47479 u8 pad[256]; // allow up to 256 byte (max) serial number
47480 } result;
47481
47482- char tmp[24 + 1];
47483-
47484 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
47485
47486 if (token < 0) {
47487@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
47488 }
47489
47490 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
47491- seq_printf(seq, "Module name : %s\n",
47492- chtostr(tmp, result.module_name, 24));
47493- seq_printf(seq, "Module revision : %s\n",
47494- chtostr(tmp, result.module_rev, 8));
47495+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
47496+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
47497
47498 seq_printf(seq, "Serial number : ");
47499 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
47500@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47501 u8 instance_number[4];
47502 } result;
47503
47504- char tmp[64 + 1];
47505-
47506 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
47507
47508 if (token < 0) {
47509@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47510 return 0;
47511 }
47512
47513- seq_printf(seq, "Device name : %s\n",
47514- chtostr(tmp, result.device_name, 64));
47515- seq_printf(seq, "Service name : %s\n",
47516- chtostr(tmp, result.service_name, 64));
47517- seq_printf(seq, "Physical name : %s\n",
47518- chtostr(tmp, result.physical_location, 64));
47519- seq_printf(seq, "Instance number : %s\n",
47520- chtostr(tmp, result.instance_number, 4));
47521+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
47522+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
47523+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
47524+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
47525
47526 return 0;
47527 }
47528@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47529 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
47530 {
47531 struct i2o_device *d = (struct i2o_device *)seq->private;
47532- static u32 work32[12];
47533- static u16 *work16 = (u16 *) work32;
47534- static u8 *work8 = (u8 *) work32;
47535+ u32 work32[12];
47536+ u16 *work16 = (u16 *) work32;
47537+ u8 *work8 = (u8 *) work32;
47538 int token;
47539
47540 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
47541diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
47542index 92752fb..a7494f6 100644
47543--- a/drivers/message/i2o/iop.c
47544+++ b/drivers/message/i2o/iop.c
47545@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
47546
47547 spin_lock_irqsave(&c->context_list_lock, flags);
47548
47549- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
47550- atomic_inc(&c->context_list_counter);
47551+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
47552+ atomic_inc_unchecked(&c->context_list_counter);
47553
47554- entry->context = atomic_read(&c->context_list_counter);
47555+ entry->context = atomic_read_unchecked(&c->context_list_counter);
47556
47557 list_add(&entry->list, &c->context_list);
47558
47559@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
47560
47561 #if BITS_PER_LONG == 64
47562 spin_lock_init(&c->context_list_lock);
47563- atomic_set(&c->context_list_counter, 0);
47564+ atomic_set_unchecked(&c->context_list_counter, 0);
47565 INIT_LIST_HEAD(&c->context_list);
47566 #endif
47567
47568diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
47569index d1a22aa..d0f7bf7 100644
47570--- a/drivers/mfd/ab8500-debugfs.c
47571+++ b/drivers/mfd/ab8500-debugfs.c
47572@@ -100,7 +100,7 @@ static int irq_last;
47573 static u32 *irq_count;
47574 static int num_irqs;
47575
47576-static struct device_attribute **dev_attr;
47577+static device_attribute_no_const **dev_attr;
47578 static char **event_name;
47579
47580 static u8 avg_sample = SAMPLE_16;
47581diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
47582index a83eed5..62a58a9 100644
47583--- a/drivers/mfd/max8925-i2c.c
47584+++ b/drivers/mfd/max8925-i2c.c
47585@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
47586 const struct i2c_device_id *id)
47587 {
47588 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
47589- static struct max8925_chip *chip;
47590+ struct max8925_chip *chip;
47591 struct device_node *node = client->dev.of_node;
47592
47593 if (node && !pdata) {
47594diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
47595index f9e42ea..614d240 100644
47596--- a/drivers/mfd/tps65910.c
47597+++ b/drivers/mfd/tps65910.c
47598@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
47599 struct tps65910_platform_data *pdata)
47600 {
47601 int ret = 0;
47602- static struct regmap_irq_chip *tps6591x_irqs_chip;
47603+ struct regmap_irq_chip *tps6591x_irqs_chip;
47604
47605 if (!irq) {
47606 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
47607diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
47608index 596b1f6..5b6ab74 100644
47609--- a/drivers/mfd/twl4030-irq.c
47610+++ b/drivers/mfd/twl4030-irq.c
47611@@ -34,6 +34,7 @@
47612 #include <linux/of.h>
47613 #include <linux/irqdomain.h>
47614 #include <linux/i2c/twl.h>
47615+#include <asm/pgtable.h>
47616
47617 #include "twl-core.h"
47618
47619@@ -725,10 +726,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
47620 * Install an irq handler for each of the SIH modules;
47621 * clone dummy irq_chip since PIH can't *do* anything
47622 */
47623- twl4030_irq_chip = dummy_irq_chip;
47624- twl4030_irq_chip.name = "twl4030";
47625+ pax_open_kernel();
47626+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
47627+ *(const char **)&twl4030_irq_chip.name = "twl4030";
47628
47629- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47630+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47631+ pax_close_kernel();
47632
47633 for (i = irq_base; i < irq_end; i++) {
47634 irq_set_chip_and_handler(i, &twl4030_irq_chip,
47635diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
47636index 464419b..64bae8d 100644
47637--- a/drivers/misc/c2port/core.c
47638+++ b/drivers/misc/c2port/core.c
47639@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
47640 goto error_idr_alloc;
47641 c2dev->id = ret;
47642
47643- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47644+ pax_open_kernel();
47645+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47646+ pax_close_kernel();
47647
47648 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
47649 "c2port%d", c2dev->id);
47650diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
47651index 3f2b625..945e179 100644
47652--- a/drivers/misc/eeprom/sunxi_sid.c
47653+++ b/drivers/misc/eeprom/sunxi_sid.c
47654@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
47655
47656 platform_set_drvdata(pdev, sid_data);
47657
47658- sid_bin_attr.size = sid_data->keysize;
47659+ pax_open_kernel();
47660+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
47661+ pax_close_kernel();
47662 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
47663 return -ENODEV;
47664
47665diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
47666index 36f5d52..32311c3 100644
47667--- a/drivers/misc/kgdbts.c
47668+++ b/drivers/misc/kgdbts.c
47669@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
47670 char before[BREAK_INSTR_SIZE];
47671 char after[BREAK_INSTR_SIZE];
47672
47673- probe_kernel_read(before, (char *)kgdbts_break_test,
47674+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
47675 BREAK_INSTR_SIZE);
47676 init_simple_test();
47677 ts.tst = plant_and_detach_test;
47678@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
47679 /* Activate test with initial breakpoint */
47680 if (!is_early)
47681 kgdb_breakpoint();
47682- probe_kernel_read(after, (char *)kgdbts_break_test,
47683+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
47684 BREAK_INSTR_SIZE);
47685 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
47686 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
47687diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
47688index 3ef4627..8d00486 100644
47689--- a/drivers/misc/lis3lv02d/lis3lv02d.c
47690+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
47691@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
47692 * the lid is closed. This leads to interrupts as soon as a little move
47693 * is done.
47694 */
47695- atomic_inc(&lis3->count);
47696+ atomic_inc_unchecked(&lis3->count);
47697
47698 wake_up_interruptible(&lis3->misc_wait);
47699 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
47700@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
47701 if (lis3->pm_dev)
47702 pm_runtime_get_sync(lis3->pm_dev);
47703
47704- atomic_set(&lis3->count, 0);
47705+ atomic_set_unchecked(&lis3->count, 0);
47706 return 0;
47707 }
47708
47709@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
47710 add_wait_queue(&lis3->misc_wait, &wait);
47711 while (true) {
47712 set_current_state(TASK_INTERRUPTIBLE);
47713- data = atomic_xchg(&lis3->count, 0);
47714+ data = atomic_xchg_unchecked(&lis3->count, 0);
47715 if (data)
47716 break;
47717
47718@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
47719 struct lis3lv02d, miscdev);
47720
47721 poll_wait(file, &lis3->misc_wait, wait);
47722- if (atomic_read(&lis3->count))
47723+ if (atomic_read_unchecked(&lis3->count))
47724 return POLLIN | POLLRDNORM;
47725 return 0;
47726 }
47727diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
47728index c439c82..1f20f57 100644
47729--- a/drivers/misc/lis3lv02d/lis3lv02d.h
47730+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
47731@@ -297,7 +297,7 @@ struct lis3lv02d {
47732 struct input_polled_dev *idev; /* input device */
47733 struct platform_device *pdev; /* platform device */
47734 struct regulator_bulk_data regulators[2];
47735- atomic_t count; /* interrupt count after last read */
47736+ atomic_unchecked_t count; /* interrupt count after last read */
47737 union axis_conversion ac; /* hw -> logical axis */
47738 int mapped_btns[3];
47739
47740diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
47741index 2f30bad..c4c13d0 100644
47742--- a/drivers/misc/sgi-gru/gruhandles.c
47743+++ b/drivers/misc/sgi-gru/gruhandles.c
47744@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
47745 unsigned long nsec;
47746
47747 nsec = CLKS2NSEC(clks);
47748- atomic_long_inc(&mcs_op_statistics[op].count);
47749- atomic_long_add(nsec, &mcs_op_statistics[op].total);
47750+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
47751+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
47752 if (mcs_op_statistics[op].max < nsec)
47753 mcs_op_statistics[op].max = nsec;
47754 }
47755diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
47756index 4f76359..cdfcb2e 100644
47757--- a/drivers/misc/sgi-gru/gruprocfs.c
47758+++ b/drivers/misc/sgi-gru/gruprocfs.c
47759@@ -32,9 +32,9 @@
47760
47761 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
47762
47763-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
47764+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
47765 {
47766- unsigned long val = atomic_long_read(v);
47767+ unsigned long val = atomic_long_read_unchecked(v);
47768
47769 seq_printf(s, "%16lu %s\n", val, id);
47770 }
47771@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
47772
47773 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
47774 for (op = 0; op < mcsop_last; op++) {
47775- count = atomic_long_read(&mcs_op_statistics[op].count);
47776- total = atomic_long_read(&mcs_op_statistics[op].total);
47777+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
47778+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
47779 max = mcs_op_statistics[op].max;
47780 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
47781 count ? total / count : 0, max);
47782diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
47783index 5c3ce24..4915ccb 100644
47784--- a/drivers/misc/sgi-gru/grutables.h
47785+++ b/drivers/misc/sgi-gru/grutables.h
47786@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
47787 * GRU statistics.
47788 */
47789 struct gru_stats_s {
47790- atomic_long_t vdata_alloc;
47791- atomic_long_t vdata_free;
47792- atomic_long_t gts_alloc;
47793- atomic_long_t gts_free;
47794- atomic_long_t gms_alloc;
47795- atomic_long_t gms_free;
47796- atomic_long_t gts_double_allocate;
47797- atomic_long_t assign_context;
47798- atomic_long_t assign_context_failed;
47799- atomic_long_t free_context;
47800- atomic_long_t load_user_context;
47801- atomic_long_t load_kernel_context;
47802- atomic_long_t lock_kernel_context;
47803- atomic_long_t unlock_kernel_context;
47804- atomic_long_t steal_user_context;
47805- atomic_long_t steal_kernel_context;
47806- atomic_long_t steal_context_failed;
47807- atomic_long_t nopfn;
47808- atomic_long_t asid_new;
47809- atomic_long_t asid_next;
47810- atomic_long_t asid_wrap;
47811- atomic_long_t asid_reuse;
47812- atomic_long_t intr;
47813- atomic_long_t intr_cbr;
47814- atomic_long_t intr_tfh;
47815- atomic_long_t intr_spurious;
47816- atomic_long_t intr_mm_lock_failed;
47817- atomic_long_t call_os;
47818- atomic_long_t call_os_wait_queue;
47819- atomic_long_t user_flush_tlb;
47820- atomic_long_t user_unload_context;
47821- atomic_long_t user_exception;
47822- atomic_long_t set_context_option;
47823- atomic_long_t check_context_retarget_intr;
47824- atomic_long_t check_context_unload;
47825- atomic_long_t tlb_dropin;
47826- atomic_long_t tlb_preload_page;
47827- atomic_long_t tlb_dropin_fail_no_asid;
47828- atomic_long_t tlb_dropin_fail_upm;
47829- atomic_long_t tlb_dropin_fail_invalid;
47830- atomic_long_t tlb_dropin_fail_range_active;
47831- atomic_long_t tlb_dropin_fail_idle;
47832- atomic_long_t tlb_dropin_fail_fmm;
47833- atomic_long_t tlb_dropin_fail_no_exception;
47834- atomic_long_t tfh_stale_on_fault;
47835- atomic_long_t mmu_invalidate_range;
47836- atomic_long_t mmu_invalidate_page;
47837- atomic_long_t flush_tlb;
47838- atomic_long_t flush_tlb_gru;
47839- atomic_long_t flush_tlb_gru_tgh;
47840- atomic_long_t flush_tlb_gru_zero_asid;
47841+ atomic_long_unchecked_t vdata_alloc;
47842+ atomic_long_unchecked_t vdata_free;
47843+ atomic_long_unchecked_t gts_alloc;
47844+ atomic_long_unchecked_t gts_free;
47845+ atomic_long_unchecked_t gms_alloc;
47846+ atomic_long_unchecked_t gms_free;
47847+ atomic_long_unchecked_t gts_double_allocate;
47848+ atomic_long_unchecked_t assign_context;
47849+ atomic_long_unchecked_t assign_context_failed;
47850+ atomic_long_unchecked_t free_context;
47851+ atomic_long_unchecked_t load_user_context;
47852+ atomic_long_unchecked_t load_kernel_context;
47853+ atomic_long_unchecked_t lock_kernel_context;
47854+ atomic_long_unchecked_t unlock_kernel_context;
47855+ atomic_long_unchecked_t steal_user_context;
47856+ atomic_long_unchecked_t steal_kernel_context;
47857+ atomic_long_unchecked_t steal_context_failed;
47858+ atomic_long_unchecked_t nopfn;
47859+ atomic_long_unchecked_t asid_new;
47860+ atomic_long_unchecked_t asid_next;
47861+ atomic_long_unchecked_t asid_wrap;
47862+ atomic_long_unchecked_t asid_reuse;
47863+ atomic_long_unchecked_t intr;
47864+ atomic_long_unchecked_t intr_cbr;
47865+ atomic_long_unchecked_t intr_tfh;
47866+ atomic_long_unchecked_t intr_spurious;
47867+ atomic_long_unchecked_t intr_mm_lock_failed;
47868+ atomic_long_unchecked_t call_os;
47869+ atomic_long_unchecked_t call_os_wait_queue;
47870+ atomic_long_unchecked_t user_flush_tlb;
47871+ atomic_long_unchecked_t user_unload_context;
47872+ atomic_long_unchecked_t user_exception;
47873+ atomic_long_unchecked_t set_context_option;
47874+ atomic_long_unchecked_t check_context_retarget_intr;
47875+ atomic_long_unchecked_t check_context_unload;
47876+ atomic_long_unchecked_t tlb_dropin;
47877+ atomic_long_unchecked_t tlb_preload_page;
47878+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
47879+ atomic_long_unchecked_t tlb_dropin_fail_upm;
47880+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
47881+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
47882+ atomic_long_unchecked_t tlb_dropin_fail_idle;
47883+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
47884+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
47885+ atomic_long_unchecked_t tfh_stale_on_fault;
47886+ atomic_long_unchecked_t mmu_invalidate_range;
47887+ atomic_long_unchecked_t mmu_invalidate_page;
47888+ atomic_long_unchecked_t flush_tlb;
47889+ atomic_long_unchecked_t flush_tlb_gru;
47890+ atomic_long_unchecked_t flush_tlb_gru_tgh;
47891+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
47892
47893- atomic_long_t copy_gpa;
47894- atomic_long_t read_gpa;
47895+ atomic_long_unchecked_t copy_gpa;
47896+ atomic_long_unchecked_t read_gpa;
47897
47898- atomic_long_t mesq_receive;
47899- atomic_long_t mesq_receive_none;
47900- atomic_long_t mesq_send;
47901- atomic_long_t mesq_send_failed;
47902- atomic_long_t mesq_noop;
47903- atomic_long_t mesq_send_unexpected_error;
47904- atomic_long_t mesq_send_lb_overflow;
47905- atomic_long_t mesq_send_qlimit_reached;
47906- atomic_long_t mesq_send_amo_nacked;
47907- atomic_long_t mesq_send_put_nacked;
47908- atomic_long_t mesq_page_overflow;
47909- atomic_long_t mesq_qf_locked;
47910- atomic_long_t mesq_qf_noop_not_full;
47911- atomic_long_t mesq_qf_switch_head_failed;
47912- atomic_long_t mesq_qf_unexpected_error;
47913- atomic_long_t mesq_noop_unexpected_error;
47914- atomic_long_t mesq_noop_lb_overflow;
47915- atomic_long_t mesq_noop_qlimit_reached;
47916- atomic_long_t mesq_noop_amo_nacked;
47917- atomic_long_t mesq_noop_put_nacked;
47918- atomic_long_t mesq_noop_page_overflow;
47919+ atomic_long_unchecked_t mesq_receive;
47920+ atomic_long_unchecked_t mesq_receive_none;
47921+ atomic_long_unchecked_t mesq_send;
47922+ atomic_long_unchecked_t mesq_send_failed;
47923+ atomic_long_unchecked_t mesq_noop;
47924+ atomic_long_unchecked_t mesq_send_unexpected_error;
47925+ atomic_long_unchecked_t mesq_send_lb_overflow;
47926+ atomic_long_unchecked_t mesq_send_qlimit_reached;
47927+ atomic_long_unchecked_t mesq_send_amo_nacked;
47928+ atomic_long_unchecked_t mesq_send_put_nacked;
47929+ atomic_long_unchecked_t mesq_page_overflow;
47930+ atomic_long_unchecked_t mesq_qf_locked;
47931+ atomic_long_unchecked_t mesq_qf_noop_not_full;
47932+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
47933+ atomic_long_unchecked_t mesq_qf_unexpected_error;
47934+ atomic_long_unchecked_t mesq_noop_unexpected_error;
47935+ atomic_long_unchecked_t mesq_noop_lb_overflow;
47936+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
47937+ atomic_long_unchecked_t mesq_noop_amo_nacked;
47938+ atomic_long_unchecked_t mesq_noop_put_nacked;
47939+ atomic_long_unchecked_t mesq_noop_page_overflow;
47940
47941 };
47942
47943@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
47944 tghop_invalidate, mcsop_last};
47945
47946 struct mcs_op_statistic {
47947- atomic_long_t count;
47948- atomic_long_t total;
47949+ atomic_long_unchecked_t count;
47950+ atomic_long_unchecked_t total;
47951 unsigned long max;
47952 };
47953
47954@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
47955
47956 #define STAT(id) do { \
47957 if (gru_options & OPT_STATS) \
47958- atomic_long_inc(&gru_stats.id); \
47959+ atomic_long_inc_unchecked(&gru_stats.id); \
47960 } while (0)
47961
47962 #ifdef CONFIG_SGI_GRU_DEBUG
47963diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
47964index c862cd4..0d176fe 100644
47965--- a/drivers/misc/sgi-xp/xp.h
47966+++ b/drivers/misc/sgi-xp/xp.h
47967@@ -288,7 +288,7 @@ struct xpc_interface {
47968 xpc_notify_func, void *);
47969 void (*received) (short, int, void *);
47970 enum xp_retval (*partid_to_nasids) (short, void *);
47971-};
47972+} __no_const;
47973
47974 extern struct xpc_interface xpc_interface;
47975
47976diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
47977index 01be66d..e3a0c7e 100644
47978--- a/drivers/misc/sgi-xp/xp_main.c
47979+++ b/drivers/misc/sgi-xp/xp_main.c
47980@@ -78,13 +78,13 @@ xpc_notloaded(void)
47981 }
47982
47983 struct xpc_interface xpc_interface = {
47984- (void (*)(int))xpc_notloaded,
47985- (void (*)(int))xpc_notloaded,
47986- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47987- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47988+ .connect = (void (*)(int))xpc_notloaded,
47989+ .disconnect = (void (*)(int))xpc_notloaded,
47990+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47991+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47992 void *))xpc_notloaded,
47993- (void (*)(short, int, void *))xpc_notloaded,
47994- (enum xp_retval(*)(short, void *))xpc_notloaded
47995+ .received = (void (*)(short, int, void *))xpc_notloaded,
47996+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
47997 };
47998 EXPORT_SYMBOL_GPL(xpc_interface);
47999
48000diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
48001index b94d5f7..7f494c5 100644
48002--- a/drivers/misc/sgi-xp/xpc.h
48003+++ b/drivers/misc/sgi-xp/xpc.h
48004@@ -835,6 +835,7 @@ struct xpc_arch_operations {
48005 void (*received_payload) (struct xpc_channel *, void *);
48006 void (*notify_senders_of_disconnect) (struct xpc_channel *);
48007 };
48008+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
48009
48010 /* struct xpc_partition act_state values (for XPC HB) */
48011
48012@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
48013 /* found in xpc_main.c */
48014 extern struct device *xpc_part;
48015 extern struct device *xpc_chan;
48016-extern struct xpc_arch_operations xpc_arch_ops;
48017+extern xpc_arch_operations_no_const xpc_arch_ops;
48018 extern int xpc_disengage_timelimit;
48019 extern int xpc_disengage_timedout;
48020 extern int xpc_activate_IRQ_rcvd;
48021diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
48022index 82dc574..8539ab2 100644
48023--- a/drivers/misc/sgi-xp/xpc_main.c
48024+++ b/drivers/misc/sgi-xp/xpc_main.c
48025@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
48026 .notifier_call = xpc_system_die,
48027 };
48028
48029-struct xpc_arch_operations xpc_arch_ops;
48030+xpc_arch_operations_no_const xpc_arch_ops;
48031
48032 /*
48033 * Timer function to enforce the timelimit on the partition disengage.
48034@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
48035
48036 if (((die_args->trapnr == X86_TRAP_MF) ||
48037 (die_args->trapnr == X86_TRAP_XF)) &&
48038- !user_mode_vm(die_args->regs))
48039+ !user_mode(die_args->regs))
48040 xpc_die_deactivate();
48041
48042 break;
48043diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
48044index 452782b..0c10e40 100644
48045--- a/drivers/mmc/card/block.c
48046+++ b/drivers/mmc/card/block.c
48047@@ -574,7 +574,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
48048 if (idata->ic.postsleep_min_us)
48049 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
48050
48051- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
48052+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
48053 err = -EFAULT;
48054 goto cmd_rel_host;
48055 }
48056diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
48057index f51b5ba..86614a7 100644
48058--- a/drivers/mmc/core/mmc_ops.c
48059+++ b/drivers/mmc/core/mmc_ops.c
48060@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
48061 void *data_buf;
48062 int is_on_stack;
48063
48064- is_on_stack = object_is_on_stack(buf);
48065+ is_on_stack = object_starts_on_stack(buf);
48066 if (is_on_stack) {
48067 /*
48068 * dma onto stack is unsafe/nonportable, but callers to this
48069diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
48070index 738fa24..1568451 100644
48071--- a/drivers/mmc/host/dw_mmc.h
48072+++ b/drivers/mmc/host/dw_mmc.h
48073@@ -257,5 +257,5 @@ struct dw_mci_drv_data {
48074 int (*parse_dt)(struct dw_mci *host);
48075 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
48076 struct dw_mci_tuning_data *tuning_data);
48077-};
48078+} __do_const;
48079 #endif /* _DW_MMC_H_ */
48080diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
48081index 7ad463e..2aa0079 100644
48082--- a/drivers/mmc/host/mmci.c
48083+++ b/drivers/mmc/host/mmci.c
48084@@ -1506,7 +1506,9 @@ static int mmci_probe(struct amba_device *dev,
48085 mmc->caps |= MMC_CAP_CMD23;
48086
48087 if (variant->busy_detect) {
48088- mmci_ops.card_busy = mmci_card_busy;
48089+ pax_open_kernel();
48090+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
48091+ pax_close_kernel();
48092 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
48093 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
48094 mmc->max_busy_timeout = 0;
48095diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
48096index ccec0e3..199f9ce 100644
48097--- a/drivers/mmc/host/sdhci-esdhc-imx.c
48098+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
48099@@ -1034,9 +1034,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
48100 host->mmc->caps |= MMC_CAP_1_8V_DDR;
48101 }
48102
48103- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
48104- sdhci_esdhc_ops.platform_execute_tuning =
48105+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
48106+ pax_open_kernel();
48107+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
48108 esdhc_executing_tuning;
48109+ pax_close_kernel();
48110+ }
48111
48112 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
48113 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
48114diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
48115index fa5954a..56840e5 100644
48116--- a/drivers/mmc/host/sdhci-s3c.c
48117+++ b/drivers/mmc/host/sdhci-s3c.c
48118@@ -584,9 +584,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
48119 * we can use overriding functions instead of default.
48120 */
48121 if (sc->no_divider) {
48122- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48123- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48124- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48125+ pax_open_kernel();
48126+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48127+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48128+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48129+ pax_close_kernel();
48130 }
48131
48132 /* It supports additional host capabilities if needed */
48133diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
48134index 423666b..81ff5eb 100644
48135--- a/drivers/mtd/chips/cfi_cmdset_0020.c
48136+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
48137@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
48138 size_t totlen = 0, thislen;
48139 int ret = 0;
48140 size_t buflen = 0;
48141- static char *buffer;
48142+ char *buffer;
48143
48144 if (!ECCBUF_SIZE) {
48145 /* We should fall back to a general writev implementation.
48146diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
48147index 9f2012a..a81c720 100644
48148--- a/drivers/mtd/nand/denali.c
48149+++ b/drivers/mtd/nand/denali.c
48150@@ -24,6 +24,7 @@
48151 #include <linux/slab.h>
48152 #include <linux/mtd/mtd.h>
48153 #include <linux/module.h>
48154+#include <linux/slab.h>
48155
48156 #include "denali.h"
48157
48158diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48159index f638cd8..2cbf586 100644
48160--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48161+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48162@@ -387,7 +387,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
48163
48164 /* first try to map the upper buffer directly */
48165 if (virt_addr_valid(this->upper_buf) &&
48166- !object_is_on_stack(this->upper_buf)) {
48167+ !object_starts_on_stack(this->upper_buf)) {
48168 sg_init_one(sgl, this->upper_buf, this->upper_len);
48169 ret = dma_map_sg(this->dev, sgl, 1, dr);
48170 if (ret == 0)
48171diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
48172index 51b9d6a..52af9a7 100644
48173--- a/drivers/mtd/nftlmount.c
48174+++ b/drivers/mtd/nftlmount.c
48175@@ -24,6 +24,7 @@
48176 #include <asm/errno.h>
48177 #include <linux/delay.h>
48178 #include <linux/slab.h>
48179+#include <linux/sched.h>
48180 #include <linux/mtd/mtd.h>
48181 #include <linux/mtd/nand.h>
48182 #include <linux/mtd/nftl.h>
48183diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
48184index cf49c22..971b133 100644
48185--- a/drivers/mtd/sm_ftl.c
48186+++ b/drivers/mtd/sm_ftl.c
48187@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
48188 #define SM_CIS_VENDOR_OFFSET 0x59
48189 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
48190 {
48191- struct attribute_group *attr_group;
48192+ attribute_group_no_const *attr_group;
48193 struct attribute **attributes;
48194 struct sm_sysfs_attribute *vendor_attribute;
48195 char *vendor;
48196diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
48197index 5ab3c18..5c3a836 100644
48198--- a/drivers/net/bonding/bond_netlink.c
48199+++ b/drivers/net/bonding/bond_netlink.c
48200@@ -542,7 +542,7 @@ nla_put_failure:
48201 return -EMSGSIZE;
48202 }
48203
48204-struct rtnl_link_ops bond_link_ops __read_mostly = {
48205+struct rtnl_link_ops bond_link_ops = {
48206 .kind = "bond",
48207 .priv_size = sizeof(struct bonding),
48208 .setup = bond_setup,
48209diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
48210index 4168822..f38eeddf 100644
48211--- a/drivers/net/can/Kconfig
48212+++ b/drivers/net/can/Kconfig
48213@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
48214
48215 config CAN_FLEXCAN
48216 tristate "Support for Freescale FLEXCAN based chips"
48217- depends on ARM || PPC
48218+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
48219 ---help---
48220 Say Y here if you want to support for Freescale FlexCAN.
48221
48222diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
48223index 1d162cc..b546a75 100644
48224--- a/drivers/net/ethernet/8390/ax88796.c
48225+++ b/drivers/net/ethernet/8390/ax88796.c
48226@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
48227 if (ax->plat->reg_offsets)
48228 ei_local->reg_offset = ax->plat->reg_offsets;
48229 else {
48230+ resource_size_t _mem_size = mem_size;
48231+ do_div(_mem_size, 0x18);
48232 ei_local->reg_offset = ax->reg_offsets;
48233 for (ret = 0; ret < 0x18; ret++)
48234- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
48235+ ax->reg_offsets[ret] = _mem_size * ret;
48236 }
48237
48238 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
48239diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
48240index 7330681..7e9e463 100644
48241--- a/drivers/net/ethernet/altera/altera_tse_main.c
48242+++ b/drivers/net/ethernet/altera/altera_tse_main.c
48243@@ -1182,7 +1182,7 @@ static int tse_shutdown(struct net_device *dev)
48244 return 0;
48245 }
48246
48247-static struct net_device_ops altera_tse_netdev_ops = {
48248+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
48249 .ndo_open = tse_open,
48250 .ndo_stop = tse_shutdown,
48251 .ndo_start_xmit = tse_start_xmit,
48252@@ -1439,11 +1439,13 @@ static int altera_tse_probe(struct platform_device *pdev)
48253 ndev->netdev_ops = &altera_tse_netdev_ops;
48254 altera_tse_set_ethtool_ops(ndev);
48255
48256+ pax_open_kernel();
48257 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
48258
48259 if (priv->hash_filter)
48260 altera_tse_netdev_ops.ndo_set_rx_mode =
48261 tse_set_rx_mode_hashfilter;
48262+ pax_close_kernel();
48263
48264 /* Scatter/gather IO is not supported,
48265 * so it is turned off
48266diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48267index bf462ee8..18b8375 100644
48268--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48269+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48270@@ -986,14 +986,14 @@ do { \
48271 * operations, everything works on mask values.
48272 */
48273 #define XMDIO_READ(_pdata, _mmd, _reg) \
48274- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
48275+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
48276 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
48277
48278 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
48279 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
48280
48281 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
48282- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
48283+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
48284 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
48285
48286 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
48287diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
48288index 6bb76d5..ded47a8 100644
48289--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
48290+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
48291@@ -273,7 +273,7 @@ static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
48292 struct xgbe_prv_data *pdata = filp->private_data;
48293 unsigned int value;
48294
48295- value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48296+ value = pdata->hw_if->read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48297 pdata->debugfs_xpcs_reg);
48298
48299 return xgbe_common_read(buffer, count, ppos, value);
48300@@ -291,7 +291,7 @@ static ssize_t xpcs_reg_value_write(struct file *filp,
48301 if (len < 0)
48302 return len;
48303
48304- pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48305+ pdata->hw_if->write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48306 pdata->debugfs_xpcs_reg, value);
48307
48308 return len;
48309diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48310index 6f1c859..e96ac1a 100644
48311--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48312+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48313@@ -236,7 +236,7 @@ err_ring:
48314
48315 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48316 {
48317- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48318+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48319 struct xgbe_channel *channel;
48320 struct xgbe_ring *ring;
48321 struct xgbe_ring_data *rdata;
48322@@ -277,7 +277,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48323
48324 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
48325 {
48326- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48327+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48328 struct xgbe_channel *channel;
48329 struct xgbe_ring *ring;
48330 struct xgbe_ring_desc *rdesc;
48331@@ -496,7 +496,7 @@ err_out:
48332 static void xgbe_realloc_skb(struct xgbe_channel *channel)
48333 {
48334 struct xgbe_prv_data *pdata = channel->pdata;
48335- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48336+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48337 struct xgbe_ring *ring = channel->rx_ring;
48338 struct xgbe_ring_data *rdata;
48339 struct sk_buff *skb = NULL;
48340@@ -540,17 +540,12 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
48341 DBGPR("<--xgbe_realloc_skb\n");
48342 }
48343
48344-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
48345-{
48346- DBGPR("-->xgbe_init_function_ptrs_desc\n");
48347-
48348- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
48349- desc_if->free_ring_resources = xgbe_free_ring_resources;
48350- desc_if->map_tx_skb = xgbe_map_tx_skb;
48351- desc_if->realloc_skb = xgbe_realloc_skb;
48352- desc_if->unmap_skb = xgbe_unmap_skb;
48353- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
48354- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
48355-
48356- DBGPR("<--xgbe_init_function_ptrs_desc\n");
48357-}
48358+const struct xgbe_desc_if default_xgbe_desc_if = {
48359+ .alloc_ring_resources = xgbe_alloc_ring_resources,
48360+ .free_ring_resources = xgbe_free_ring_resources,
48361+ .map_tx_skb = xgbe_map_tx_skb,
48362+ .realloc_skb = xgbe_realloc_skb,
48363+ .unmap_skb = xgbe_unmap_skb,
48364+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
48365+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
48366+};
48367diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48368index 002293b..5ced1dd 100644
48369--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48370+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48371@@ -2030,7 +2030,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
48372
48373 static int xgbe_init(struct xgbe_prv_data *pdata)
48374 {
48375- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48376+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48377 int ret;
48378
48379 DBGPR("-->xgbe_init\n");
48380@@ -2096,87 +2096,82 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
48381 return 0;
48382 }
48383
48384-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
48385-{
48386- DBGPR("-->xgbe_init_function_ptrs\n");
48387-
48388- hw_if->tx_complete = xgbe_tx_complete;
48389-
48390- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
48391- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
48392- hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs;
48393- hw_if->set_mac_address = xgbe_set_mac_address;
48394-
48395- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
48396- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
48397-
48398- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
48399- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
48400-
48401- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
48402- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
48403-
48404- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
48405- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
48406- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
48407-
48408- hw_if->enable_tx = xgbe_enable_tx;
48409- hw_if->disable_tx = xgbe_disable_tx;
48410- hw_if->enable_rx = xgbe_enable_rx;
48411- hw_if->disable_rx = xgbe_disable_rx;
48412-
48413- hw_if->powerup_tx = xgbe_powerup_tx;
48414- hw_if->powerdown_tx = xgbe_powerdown_tx;
48415- hw_if->powerup_rx = xgbe_powerup_rx;
48416- hw_if->powerdown_rx = xgbe_powerdown_rx;
48417-
48418- hw_if->pre_xmit = xgbe_pre_xmit;
48419- hw_if->dev_read = xgbe_dev_read;
48420- hw_if->enable_int = xgbe_enable_int;
48421- hw_if->disable_int = xgbe_disable_int;
48422- hw_if->init = xgbe_init;
48423- hw_if->exit = xgbe_exit;
48424+const struct xgbe_hw_if default_xgbe_hw_if = {
48425+ .tx_complete = xgbe_tx_complete,
48426+
48427+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
48428+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
48429+ .set_addn_mac_addrs = xgbe_set_addn_mac_addrs,
48430+ .set_mac_address = xgbe_set_mac_address,
48431+
48432+ .enable_rx_csum = xgbe_enable_rx_csum,
48433+ .disable_rx_csum = xgbe_disable_rx_csum,
48434+
48435+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
48436+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
48437+
48438+ .read_mmd_regs = xgbe_read_mmd_regs,
48439+ .write_mmd_regs = xgbe_write_mmd_regs,
48440+
48441+ .set_gmii_speed = xgbe_set_gmii_speed,
48442+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
48443+ .set_xgmii_speed = xgbe_set_xgmii_speed,
48444+
48445+ .enable_tx = xgbe_enable_tx,
48446+ .disable_tx = xgbe_disable_tx,
48447+ .enable_rx = xgbe_enable_rx,
48448+ .disable_rx = xgbe_disable_rx,
48449+
48450+ .powerup_tx = xgbe_powerup_tx,
48451+ .powerdown_tx = xgbe_powerdown_tx,
48452+ .powerup_rx = xgbe_powerup_rx,
48453+ .powerdown_rx = xgbe_powerdown_rx,
48454+
48455+ .pre_xmit = xgbe_pre_xmit,
48456+ .dev_read = xgbe_dev_read,
48457+ .enable_int = xgbe_enable_int,
48458+ .disable_int = xgbe_disable_int,
48459+ .init = xgbe_init,
48460+ .exit = xgbe_exit,
48461
48462 /* Descriptor related Sequences have to be initialized here */
48463- hw_if->tx_desc_init = xgbe_tx_desc_init;
48464- hw_if->rx_desc_init = xgbe_rx_desc_init;
48465- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
48466- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
48467- hw_if->is_last_desc = xgbe_is_last_desc;
48468- hw_if->is_context_desc = xgbe_is_context_desc;
48469+ .tx_desc_init = xgbe_tx_desc_init,
48470+ .rx_desc_init = xgbe_rx_desc_init,
48471+ .tx_desc_reset = xgbe_tx_desc_reset,
48472+ .rx_desc_reset = xgbe_rx_desc_reset,
48473+ .is_last_desc = xgbe_is_last_desc,
48474+ .is_context_desc = xgbe_is_context_desc,
48475
48476 /* For FLOW ctrl */
48477- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
48478- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
48479+ .config_tx_flow_control = xgbe_config_tx_flow_control,
48480+ .config_rx_flow_control = xgbe_config_rx_flow_control,
48481
48482 /* For RX coalescing */
48483- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
48484- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
48485- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
48486- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
48487+ .config_rx_coalesce = xgbe_config_rx_coalesce,
48488+ .config_tx_coalesce = xgbe_config_tx_coalesce,
48489+ .usec_to_riwt = xgbe_usec_to_riwt,
48490+ .riwt_to_usec = xgbe_riwt_to_usec,
48491
48492 /* For RX and TX threshold config */
48493- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
48494- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
48495+ .config_rx_threshold = xgbe_config_rx_threshold,
48496+ .config_tx_threshold = xgbe_config_tx_threshold,
48497
48498 /* For RX and TX Store and Forward Mode config */
48499- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
48500- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
48501+ .config_rsf_mode = xgbe_config_rsf_mode,
48502+ .config_tsf_mode = xgbe_config_tsf_mode,
48503
48504 /* For TX DMA Operating on Second Frame config */
48505- hw_if->config_osp_mode = xgbe_config_osp_mode;
48506+ .config_osp_mode = xgbe_config_osp_mode,
48507
48508 /* For RX and TX PBL config */
48509- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
48510- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
48511- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
48512- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
48513- hw_if->config_pblx8 = xgbe_config_pblx8;
48514+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
48515+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
48516+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
48517+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
48518+ .config_pblx8 = xgbe_config_pblx8,
48519
48520 /* For MMC statistics support */
48521- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
48522- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
48523- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
48524-
48525- DBGPR("<--xgbe_init_function_ptrs\n");
48526-}
48527+ .tx_mmc_int = xgbe_tx_mmc_int,
48528+ .rx_mmc_int = xgbe_rx_mmc_int,
48529+ .read_mmc_stats = xgbe_read_mmc_stats,
48530+};
48531diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48532index cfe3d93..07a78ae 100644
48533--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48534+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48535@@ -153,7 +153,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
48536
48537 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48538 {
48539- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48540+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48541 struct xgbe_channel *channel;
48542 unsigned int i;
48543
48544@@ -170,7 +170,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48545
48546 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48547 {
48548- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48549+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48550 struct xgbe_channel *channel;
48551 unsigned int i;
48552
48553@@ -188,7 +188,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48554 static irqreturn_t xgbe_isr(int irq, void *data)
48555 {
48556 struct xgbe_prv_data *pdata = data;
48557- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48558+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48559 struct xgbe_channel *channel;
48560 unsigned int dma_isr, dma_ch_isr;
48561 unsigned int mac_isr;
48562@@ -403,7 +403,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
48563
48564 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48565 {
48566- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48567+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48568
48569 DBGPR("-->xgbe_init_tx_coalesce\n");
48570
48571@@ -417,7 +417,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48572
48573 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48574 {
48575- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48576+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48577
48578 DBGPR("-->xgbe_init_rx_coalesce\n");
48579
48580@@ -431,7 +431,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48581
48582 static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
48583 {
48584- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48585+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48586 struct xgbe_channel *channel;
48587 struct xgbe_ring *ring;
48588 struct xgbe_ring_data *rdata;
48589@@ -456,7 +456,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
48590
48591 static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
48592 {
48593- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48594+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48595 struct xgbe_channel *channel;
48596 struct xgbe_ring *ring;
48597 struct xgbe_ring_data *rdata;
48598@@ -482,7 +482,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
48599 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48600 {
48601 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48602- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48603+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48604 unsigned long flags;
48605
48606 DBGPR("-->xgbe_powerdown\n");
48607@@ -520,7 +520,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48608 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48609 {
48610 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48611- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48612+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48613 unsigned long flags;
48614
48615 DBGPR("-->xgbe_powerup\n");
48616@@ -557,7 +557,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48617
48618 static int xgbe_start(struct xgbe_prv_data *pdata)
48619 {
48620- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48621+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48622 struct net_device *netdev = pdata->netdev;
48623
48624 DBGPR("-->xgbe_start\n");
48625@@ -583,7 +583,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
48626
48627 static void xgbe_stop(struct xgbe_prv_data *pdata)
48628 {
48629- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48630+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48631 struct net_device *netdev = pdata->netdev;
48632
48633 DBGPR("-->xgbe_stop\n");
48634@@ -603,7 +603,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
48635
48636 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
48637 {
48638- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48639+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48640
48641 DBGPR("-->xgbe_restart_dev\n");
48642
48643@@ -741,8 +741,8 @@ static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
48644 static int xgbe_open(struct net_device *netdev)
48645 {
48646 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48647- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48648- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48649+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48650+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48651 int ret;
48652
48653 DBGPR("-->xgbe_open\n");
48654@@ -804,8 +804,8 @@ err_clk:
48655 static int xgbe_close(struct net_device *netdev)
48656 {
48657 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48658- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48659- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48660+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48661+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48662
48663 DBGPR("-->xgbe_close\n");
48664
48665@@ -835,8 +835,8 @@ static int xgbe_close(struct net_device *netdev)
48666 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
48667 {
48668 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48669- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48670- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48671+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48672+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48673 struct xgbe_channel *channel;
48674 struct xgbe_ring *ring;
48675 struct xgbe_packet_data *packet;
48676@@ -903,7 +903,7 @@ tx_netdev_return:
48677 static void xgbe_set_rx_mode(struct net_device *netdev)
48678 {
48679 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48680- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48681+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48682 unsigned int pr_mode, am_mode;
48683
48684 DBGPR("-->xgbe_set_rx_mode\n");
48685@@ -930,7 +930,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
48686 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
48687 {
48688 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48689- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48690+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48691 struct sockaddr *saddr = addr;
48692
48693 DBGPR("-->xgbe_set_mac_address\n");
48694@@ -976,7 +976,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
48695
48696 DBGPR("-->%s\n", __func__);
48697
48698- pdata->hw_if.read_mmc_stats(pdata);
48699+ pdata->hw_if->read_mmc_stats(pdata);
48700
48701 s->rx_packets = pstats->rxframecount_gb;
48702 s->rx_bytes = pstats->rxoctetcount_gb;
48703@@ -1020,7 +1020,7 @@ static int xgbe_set_features(struct net_device *netdev,
48704 netdev_features_t features)
48705 {
48706 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48707- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48708+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48709 unsigned int rxcsum_enabled, rxvlan_enabled;
48710
48711 rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM);
48712@@ -1072,8 +1072,8 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
48713 static int xgbe_tx_poll(struct xgbe_channel *channel)
48714 {
48715 struct xgbe_prv_data *pdata = channel->pdata;
48716- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48717- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48718+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48719+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48720 struct xgbe_ring *ring = channel->tx_ring;
48721 struct xgbe_ring_data *rdata;
48722 struct xgbe_ring_desc *rdesc;
48723@@ -1124,8 +1124,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
48724 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
48725 {
48726 struct xgbe_prv_data *pdata = channel->pdata;
48727- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48728- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48729+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48730+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48731 struct xgbe_ring *ring = channel->rx_ring;
48732 struct xgbe_ring_data *rdata;
48733 struct xgbe_packet_data *packet;
48734diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48735index 8909f2b..719e767 100644
48736--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48737+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48738@@ -202,7 +202,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
48739
48740 DBGPR("-->%s\n", __func__);
48741
48742- pdata->hw_if.read_mmc_stats(pdata);
48743+ pdata->hw_if->read_mmc_stats(pdata);
48744 for (i = 0; i < XGBE_STATS_COUNT; i++) {
48745 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
48746 *data++ = *(u64 *)stat;
48747@@ -387,7 +387,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
48748 struct ethtool_coalesce *ec)
48749 {
48750 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48751- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48752+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48753 unsigned int riwt;
48754
48755 DBGPR("-->xgbe_get_coalesce\n");
48756@@ -410,7 +410,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
48757 struct ethtool_coalesce *ec)
48758 {
48759 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48760- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48761+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48762 unsigned int rx_frames, rx_riwt, rx_usecs;
48763 unsigned int tx_frames, tx_usecs;
48764
48765diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48766index 5a1891f..1b7888e 100644
48767--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48768+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48769@@ -210,12 +210,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
48770 DBGPR("<--xgbe_default_config\n");
48771 }
48772
48773-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
48774-{
48775- xgbe_init_function_ptrs_dev(&pdata->hw_if);
48776- xgbe_init_function_ptrs_desc(&pdata->desc_if);
48777-}
48778-
48779 static int xgbe_probe(struct platform_device *pdev)
48780 {
48781 struct xgbe_prv_data *pdata;
48782@@ -306,9 +300,8 @@ static int xgbe_probe(struct platform_device *pdev)
48783 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
48784
48785 /* Set all the function pointers */
48786- xgbe_init_all_fptrs(pdata);
48787- hw_if = &pdata->hw_if;
48788- desc_if = &pdata->desc_if;
48789+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
48790+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
48791
48792 /* Issue software reset to device */
48793 hw_if->exit(pdata);
48794diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48795index ea7a5d6..d10a742 100644
48796--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48797+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48798@@ -128,7 +128,7 @@
48799 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
48800 {
48801 struct xgbe_prv_data *pdata = mii->priv;
48802- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48803+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48804 int mmd_data;
48805
48806 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
48807@@ -145,7 +145,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
48808 u16 mmd_val)
48809 {
48810 struct xgbe_prv_data *pdata = mii->priv;
48811- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48812+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48813 int mmd_data = mmd_val;
48814
48815 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
48816@@ -161,7 +161,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
48817 static void xgbe_adjust_link(struct net_device *netdev)
48818 {
48819 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48820- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48821+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48822 struct phy_device *phydev = pdata->phydev;
48823 unsigned long flags;
48824 int new_state = 0;
48825diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
48826index ab06271..a560fa7 100644
48827--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
48828+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
48829@@ -527,8 +527,8 @@ struct xgbe_prv_data {
48830
48831 int irq_number;
48832
48833- struct xgbe_hw_if hw_if;
48834- struct xgbe_desc_if desc_if;
48835+ const struct xgbe_hw_if *hw_if;
48836+ const struct xgbe_desc_if *desc_if;
48837
48838 /* Rings for Tx/Rx on a DMA channel */
48839 struct xgbe_channel *channel;
48840@@ -611,6 +611,9 @@ struct xgbe_prv_data {
48841 #endif
48842 };
48843
48844+extern const struct xgbe_hw_if default_xgbe_hw_if;
48845+extern const struct xgbe_desc_if default_xgbe_desc_if;
48846+
48847 /* Function prototypes*/
48848
48849 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
48850diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48851index 571427c..e9fe9e7 100644
48852--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48853+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48854@@ -1058,7 +1058,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
48855 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
48856 {
48857 /* RX_MODE controlling object */
48858- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
48859+ bnx2x_init_rx_mode_obj(bp);
48860
48861 /* multicast configuration controlling object */
48862 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
48863diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48864index b193604..8873bfd 100644
48865--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48866+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48867@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
48868 return rc;
48869 }
48870
48871-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48872- struct bnx2x_rx_mode_obj *o)
48873+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
48874 {
48875 if (CHIP_IS_E1x(bp)) {
48876- o->wait_comp = bnx2x_empty_rx_mode_wait;
48877- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
48878+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
48879+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
48880 } else {
48881- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
48882- o->config_rx_mode = bnx2x_set_rx_mode_e2;
48883+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
48884+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
48885 }
48886 }
48887
48888diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48889index 718ecd2..2183b2f 100644
48890--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48891+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48892@@ -1340,8 +1340,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
48893
48894 /********************* RX MODE ****************/
48895
48896-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48897- struct bnx2x_rx_mode_obj *o);
48898+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
48899
48900 /**
48901 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
48902diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
48903index 461acca..2b546ba 100644
48904--- a/drivers/net/ethernet/broadcom/tg3.h
48905+++ b/drivers/net/ethernet/broadcom/tg3.h
48906@@ -150,6 +150,7 @@
48907 #define CHIPREV_ID_5750_A0 0x4000
48908 #define CHIPREV_ID_5750_A1 0x4001
48909 #define CHIPREV_ID_5750_A3 0x4003
48910+#define CHIPREV_ID_5750_C1 0x4201
48911 #define CHIPREV_ID_5750_C2 0x4202
48912 #define CHIPREV_ID_5752_A0_HW 0x5000
48913 #define CHIPREV_ID_5752_A0 0x6000
48914diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
48915index 13f9636..228040f 100644
48916--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
48917+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
48918@@ -1690,10 +1690,10 @@ bna_cb_ioceth_reset(void *arg)
48919 }
48920
48921 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
48922- bna_cb_ioceth_enable,
48923- bna_cb_ioceth_disable,
48924- bna_cb_ioceth_hbfail,
48925- bna_cb_ioceth_reset
48926+ .enable_cbfn = bna_cb_ioceth_enable,
48927+ .disable_cbfn = bna_cb_ioceth_disable,
48928+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
48929+ .reset_cbfn = bna_cb_ioceth_reset
48930 };
48931
48932 static void bna_attr_init(struct bna_ioceth *ioceth)
48933diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48934index 8cffcdf..aadf043 100644
48935--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48936+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48937@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
48938 */
48939 struct l2t_skb_cb {
48940 arp_failure_handler_func arp_failure_handler;
48941-};
48942+} __no_const;
48943
48944 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
48945
48946diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48947index a83271c..cf00874 100644
48948--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48949+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48950@@ -2174,7 +2174,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
48951
48952 int i;
48953 struct adapter *ap = netdev2adap(dev);
48954- static const unsigned int *reg_ranges;
48955+ const unsigned int *reg_ranges;
48956 int arr_size = 0, buf_size = 0;
48957
48958 if (is_t4(ap->params.chip)) {
48959diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
48960index c05b66d..ed69872 100644
48961--- a/drivers/net/ethernet/dec/tulip/de4x5.c
48962+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
48963@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48964 for (i=0; i<ETH_ALEN; i++) {
48965 tmp.addr[i] = dev->dev_addr[i];
48966 }
48967- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48968+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48969 break;
48970
48971 case DE4X5_SET_HWADDR: /* Set the hardware address */
48972@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48973 spin_lock_irqsave(&lp->lock, flags);
48974 memcpy(&statbuf, &lp->pktStats, ioc->len);
48975 spin_unlock_irqrestore(&lp->lock, flags);
48976- if (copy_to_user(ioc->data, &statbuf, ioc->len))
48977+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
48978 return -EFAULT;
48979 break;
48980 }
48981diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
48982index 1e187fb..d024547 100644
48983--- a/drivers/net/ethernet/emulex/benet/be_main.c
48984+++ b/drivers/net/ethernet/emulex/benet/be_main.c
48985@@ -533,7 +533,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
48986
48987 if (wrapped)
48988 newacc += 65536;
48989- ACCESS_ONCE(*acc) = newacc;
48990+ ACCESS_ONCE_RW(*acc) = newacc;
48991 }
48992
48993 static void populate_erx_stats(struct be_adapter *adapter,
48994diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
48995index c77fa4a..7fd42fc 100644
48996--- a/drivers/net/ethernet/faraday/ftgmac100.c
48997+++ b/drivers/net/ethernet/faraday/ftgmac100.c
48998@@ -30,6 +30,8 @@
48999 #include <linux/netdevice.h>
49000 #include <linux/phy.h>
49001 #include <linux/platform_device.h>
49002+#include <linux/interrupt.h>
49003+#include <linux/irqreturn.h>
49004 #include <net/ip.h>
49005
49006 #include "ftgmac100.h"
49007diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
49008index 4ff1adc..0ea6bf4 100644
49009--- a/drivers/net/ethernet/faraday/ftmac100.c
49010+++ b/drivers/net/ethernet/faraday/ftmac100.c
49011@@ -31,6 +31,8 @@
49012 #include <linux/module.h>
49013 #include <linux/netdevice.h>
49014 #include <linux/platform_device.h>
49015+#include <linux/interrupt.h>
49016+#include <linux/irqreturn.h>
49017
49018 #include "ftmac100.h"
49019
49020diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49021index 101f439..59e7ec6 100644
49022--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49023+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49024@@ -401,7 +401,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
49025 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
49026
49027 /* Update the base adjustement value. */
49028- ACCESS_ONCE(pf->ptp_base_adj) = incval;
49029+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
49030 smp_mb(); /* Force the above update. */
49031 }
49032
49033diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49034index 68f87ec..241dbe3 100644
49035--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49036+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49037@@ -792,7 +792,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
49038 }
49039
49040 /* update the base incval used to calculate frequency adjustment */
49041- ACCESS_ONCE(adapter->base_incval) = incval;
49042+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
49043 smp_mb();
49044
49045 /* need lock to prevent incorrect read while modifying cyclecounter */
49046diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49047index 2bbd01f..e8baa64 100644
49048--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
49049+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49050@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49051 struct __vxge_hw_fifo *fifo;
49052 struct vxge_hw_fifo_config *config;
49053 u32 txdl_size, txdl_per_memblock;
49054- struct vxge_hw_mempool_cbs fifo_mp_callback;
49055+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
49056+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
49057+ };
49058+
49059 struct __vxge_hw_virtualpath *vpath;
49060
49061 if ((vp == NULL) || (attr == NULL)) {
49062@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49063 goto exit;
49064 }
49065
49066- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
49067-
49068 fifo->mempool =
49069 __vxge_hw_mempool_create(vpath->hldev,
49070 fifo->config->memblock_size,
49071diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
49072index 73e6683..464e910 100644
49073--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
49074+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
49075@@ -120,6 +120,10 @@ static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
49076 int data);
49077 static void pch_gbe_set_multi(struct net_device *netdev);
49078
49079+static struct sock_filter ptp_filter[] = {
49080+ PTP_FILTER
49081+};
49082+
49083 static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49084 {
49085 u8 *data = skb->data;
49086@@ -127,7 +131,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49087 u16 *hi, *id;
49088 u32 lo;
49089
49090- if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
49091+ if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
49092 return 0;
49093
49094 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
49095@@ -2631,6 +2635,11 @@ static int pch_gbe_probe(struct pci_dev *pdev,
49096
49097 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
49098 PCI_DEVFN(12, 4));
49099+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
49100+ dev_err(&pdev->dev, "Bad ptp filter\n");
49101+ ret = -EINVAL;
49102+ goto err_free_netdev;
49103+ }
49104
49105 netdev->netdev_ops = &pch_gbe_netdev_ops;
49106 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
49107diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49108index f33559b..c7f50ac 100644
49109--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49110+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49111@@ -2176,7 +2176,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
49112 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
49113 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
49114 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
49115- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49116+ pax_open_kernel();
49117+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49118+ pax_close_kernel();
49119 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49120 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
49121 max_tx_rings = QLCNIC_MAX_TX_RINGS;
49122diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49123index be7d7a6..a8983f8 100644
49124--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49125+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49126@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
49127 case QLCNIC_NON_PRIV_FUNC:
49128 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
49129 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49130- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49131+ pax_open_kernel();
49132+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49133+ pax_close_kernel();
49134 break;
49135 case QLCNIC_PRIV_FUNC:
49136 ahw->op_mode = QLCNIC_PRIV_FUNC;
49137 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
49138- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49139+ pax_open_kernel();
49140+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49141+ pax_close_kernel();
49142 break;
49143 case QLCNIC_MGMT_FUNC:
49144 ahw->op_mode = QLCNIC_MGMT_FUNC;
49145 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49146- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49147+ pax_open_kernel();
49148+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49149+ pax_close_kernel();
49150 break;
49151 default:
49152 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
49153diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49154index e46fc39..abe135b 100644
49155--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49156+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49157@@ -1228,7 +1228,7 @@ flash_temp:
49158 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
49159 {
49160 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
49161- static const struct qlcnic_dump_operations *fw_dump_ops;
49162+ const struct qlcnic_dump_operations *fw_dump_ops;
49163 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
49164 u32 entry_offset, dump, no_entries, buf_offset = 0;
49165 int i, k, ops_cnt, ops_index, dump_size = 0;
49166diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
49167index 61623e9..ac97c27 100644
49168--- a/drivers/net/ethernet/realtek/r8169.c
49169+++ b/drivers/net/ethernet/realtek/r8169.c
49170@@ -759,22 +759,22 @@ struct rtl8169_private {
49171 struct mdio_ops {
49172 void (*write)(struct rtl8169_private *, int, int);
49173 int (*read)(struct rtl8169_private *, int);
49174- } mdio_ops;
49175+ } __no_const mdio_ops;
49176
49177 struct pll_power_ops {
49178 void (*down)(struct rtl8169_private *);
49179 void (*up)(struct rtl8169_private *);
49180- } pll_power_ops;
49181+ } __no_const pll_power_ops;
49182
49183 struct jumbo_ops {
49184 void (*enable)(struct rtl8169_private *);
49185 void (*disable)(struct rtl8169_private *);
49186- } jumbo_ops;
49187+ } __no_const jumbo_ops;
49188
49189 struct csi_ops {
49190 void (*write)(struct rtl8169_private *, int, int);
49191 u32 (*read)(struct rtl8169_private *, int);
49192- } csi_ops;
49193+ } __no_const csi_ops;
49194
49195 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
49196 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
49197diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
49198index 6b861e3..204ac86 100644
49199--- a/drivers/net/ethernet/sfc/ptp.c
49200+++ b/drivers/net/ethernet/sfc/ptp.c
49201@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
49202 ptp->start.dma_addr);
49203
49204 /* Clear flag that signals MC ready */
49205- ACCESS_ONCE(*start) = 0;
49206+ ACCESS_ONCE_RW(*start) = 0;
49207 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
49208 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
49209 EFX_BUG_ON_PARANOID(rc);
49210diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49211index 50617c5..b13724c 100644
49212--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49213+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49214@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
49215
49216 writel(value, ioaddr + MMC_CNTRL);
49217
49218- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49219- MMC_CNTRL, value);
49220+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49221+// MMC_CNTRL, value);
49222 }
49223
49224 /* To mask all all interrupts.*/
49225diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
49226index 6b56f85..50e285f 100644
49227--- a/drivers/net/ethernet/ti/cpts.c
49228+++ b/drivers/net/ethernet/ti/cpts.c
49229@@ -33,6 +33,10 @@
49230
49231 #ifdef CONFIG_TI_CPTS
49232
49233+static struct sock_filter ptp_filter[] = {
49234+ PTP_FILTER
49235+};
49236+
49237 #define cpts_read32(c, r) __raw_readl(&c->reg->r)
49238 #define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r)
49239
49240@@ -296,7 +300,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
49241 u64 ns = 0;
49242 struct cpts_event *event;
49243 struct list_head *this, *next;
49244- unsigned int class = ptp_classify_raw(skb);
49245+ unsigned int class = sk_run_filter(skb, ptp_filter);
49246 unsigned long flags;
49247 u16 seqid;
49248 u8 mtype;
49249@@ -367,6 +371,10 @@ int cpts_register(struct device *dev, struct cpts *cpts,
49250 int err, i;
49251 unsigned long flags;
49252
49253+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
49254+ pr_err("cpts: bad ptp filter\n");
49255+ return -EINVAL;
49256+ }
49257 cpts->info = cpts_info;
49258 cpts->clock = ptp_clock_register(&cpts->info, dev);
49259 if (IS_ERR(cpts->clock)) {
49260diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
49261index b81bc9f..3f43101 100644
49262--- a/drivers/net/ethernet/xscale/Kconfig
49263+++ b/drivers/net/ethernet/xscale/Kconfig
49264@@ -23,7 +23,6 @@ config IXP4XX_ETH
49265 tristate "Intel IXP4xx Ethernet support"
49266 depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
49267 select PHYLIB
49268- select NET_PTP_CLASSIFY
49269 ---help---
49270 Say Y here if you want to use built-in Ethernet ports
49271 on IXP4xx processor.
49272diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
49273index f7e0f0f..25283f1 100644
49274--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
49275+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
49276@@ -256,6 +256,10 @@ static int ports_open;
49277 static struct port *npe_port_tab[MAX_NPES];
49278 static struct dma_pool *dma_pool;
49279
49280+static struct sock_filter ptp_filter[] = {
49281+ PTP_FILTER
49282+};
49283+
49284 static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49285 {
49286 u8 *data = skb->data;
49287@@ -263,7 +267,7 @@ static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49288 u16 *hi, *id;
49289 u32 lo;
49290
49291- if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4)
49292+ if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)
49293 return 0;
49294
49295 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
49296@@ -1409,6 +1413,11 @@ static int eth_init_one(struct platform_device *pdev)
49297 char phy_id[MII_BUS_ID_SIZE + 3];
49298 int err;
49299
49300+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
49301+ pr_err("ixp4xx_eth: bad ptp filter\n");
49302+ return -EINVAL;
49303+ }
49304+
49305 if (!(dev = alloc_etherdev(sizeof(struct port))))
49306 return -ENOMEM;
49307
49308diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
49309index 6cc37c1..fdd9d77 100644
49310--- a/drivers/net/hyperv/hyperv_net.h
49311+++ b/drivers/net/hyperv/hyperv_net.h
49312@@ -170,7 +170,7 @@ struct rndis_device {
49313
49314 enum rndis_device_state state;
49315 bool link_state;
49316- atomic_t new_req_id;
49317+ atomic_unchecked_t new_req_id;
49318
49319 spinlock_t request_lock;
49320 struct list_head req_list;
49321diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
49322index 99c527a..6a2ce38 100644
49323--- a/drivers/net/hyperv/rndis_filter.c
49324+++ b/drivers/net/hyperv/rndis_filter.c
49325@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
49326 * template
49327 */
49328 set = &rndis_msg->msg.set_req;
49329- set->req_id = atomic_inc_return(&dev->new_req_id);
49330+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49331
49332 /* Add to the request list */
49333 spin_lock_irqsave(&dev->request_lock, flags);
49334@@ -930,7 +930,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
49335
49336 /* Setup the rndis set */
49337 halt = &request->request_msg.msg.halt_req;
49338- halt->req_id = atomic_inc_return(&dev->new_req_id);
49339+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49340
49341 /* Ignore return since this msg is optional. */
49342 rndis_filter_send_request(dev, request);
49343diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
49344index 78f18be..1d19c62 100644
49345--- a/drivers/net/ieee802154/fakehard.c
49346+++ b/drivers/net/ieee802154/fakehard.c
49347@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
49348 phy->transmit_power = 0xbf;
49349
49350 dev->netdev_ops = &fake_ops;
49351- dev->ml_priv = &fake_mlme;
49352+ dev->ml_priv = (void *)&fake_mlme;
49353
49354 priv = netdev_priv(dev);
49355 priv->phy = phy;
49356diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
49357index ef8a5c2..76877d6 100644
49358--- a/drivers/net/macvlan.c
49359+++ b/drivers/net/macvlan.c
49360@@ -264,7 +264,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
49361 free_nskb:
49362 kfree_skb(nskb);
49363 err:
49364- atomic_long_inc(&skb->dev->rx_dropped);
49365+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
49366 }
49367
49368 /* called under rcu_read_lock() from netif_receive_skb */
49369@@ -1134,13 +1134,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
49370 int macvlan_link_register(struct rtnl_link_ops *ops)
49371 {
49372 /* common fields */
49373- ops->priv_size = sizeof(struct macvlan_dev);
49374- ops->validate = macvlan_validate;
49375- ops->maxtype = IFLA_MACVLAN_MAX;
49376- ops->policy = macvlan_policy;
49377- ops->changelink = macvlan_changelink;
49378- ops->get_size = macvlan_get_size;
49379- ops->fill_info = macvlan_fill_info;
49380+ pax_open_kernel();
49381+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
49382+ *(void **)&ops->validate = macvlan_validate;
49383+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
49384+ *(const void **)&ops->policy = macvlan_policy;
49385+ *(void **)&ops->changelink = macvlan_changelink;
49386+ *(void **)&ops->get_size = macvlan_get_size;
49387+ *(void **)&ops->fill_info = macvlan_fill_info;
49388+ pax_close_kernel();
49389
49390 return rtnl_link_register(ops);
49391 };
49392@@ -1220,7 +1222,7 @@ static int macvlan_device_event(struct notifier_block *unused,
49393 return NOTIFY_DONE;
49394 }
49395
49396-static struct notifier_block macvlan_notifier_block __read_mostly = {
49397+static struct notifier_block macvlan_notifier_block = {
49398 .notifier_call = macvlan_device_event,
49399 };
49400
49401diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
49402index 3381c4f..dea5fd5 100644
49403--- a/drivers/net/macvtap.c
49404+++ b/drivers/net/macvtap.c
49405@@ -1020,7 +1020,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
49406 }
49407
49408 ret = 0;
49409- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49410+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49411 put_user(q->flags, &ifr->ifr_flags))
49412 ret = -EFAULT;
49413 macvtap_put_vlan(vlan);
49414@@ -1190,7 +1190,7 @@ static int macvtap_device_event(struct notifier_block *unused,
49415 return NOTIFY_DONE;
49416 }
49417
49418-static struct notifier_block macvtap_notifier_block __read_mostly = {
49419+static struct notifier_block macvtap_notifier_block = {
49420 .notifier_call = macvtap_device_event,
49421 };
49422
49423diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
49424index 9408157..d53b924 100644
49425--- a/drivers/net/phy/dp83640.c
49426+++ b/drivers/net/phy/dp83640.c
49427@@ -27,7 +27,6 @@
49428 #include <linux/module.h>
49429 #include <linux/net_tstamp.h>
49430 #include <linux/netdevice.h>
49431-#include <linux/if_vlan.h>
49432 #include <linux/phy.h>
49433 #include <linux/ptp_classify.h>
49434 #include <linux/ptp_clock_kernel.h>
49435diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
49436index d5b77ef..72ff14b 100644
49437--- a/drivers/net/ppp/ppp_generic.c
49438+++ b/drivers/net/ppp/ppp_generic.c
49439@@ -143,8 +143,9 @@ struct ppp {
49440 struct sk_buff_head mrq; /* MP: receive reconstruction queue */
49441 #endif /* CONFIG_PPP_MULTILINK */
49442 #ifdef CONFIG_PPP_FILTER
49443- struct sk_filter *pass_filter; /* filter for packets to pass */
49444- struct sk_filter *active_filter;/* filter for pkts to reset idle */
49445+ struct sock_filter *pass_filter; /* filter for packets to pass */
49446+ struct sock_filter *active_filter;/* filter for pkts to reset idle */
49447+ unsigned pass_len, active_len;
49448 #endif /* CONFIG_PPP_FILTER */
49449 struct net *ppp_net; /* the net we belong to */
49450 struct ppp_link_stats stats64; /* 64 bit network stats */
49451@@ -539,7 +540,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
49452 {
49453 struct sock_fprog uprog;
49454 struct sock_filter *code = NULL;
49455- int len;
49456+ int len, err;
49457
49458 if (copy_from_user(&uprog, arg, sizeof(uprog)))
49459 return -EFAULT;
49460@@ -554,6 +555,12 @@ static int get_filter(void __user *arg, struct sock_filter **p)
49461 if (IS_ERR(code))
49462 return PTR_ERR(code);
49463
49464+ err = sk_chk_filter(code, uprog.len);
49465+ if (err) {
49466+ kfree(code);
49467+ return err;
49468+ }
49469+
49470 *p = code;
49471 return uprog.len;
49472 }
49473@@ -748,52 +755,28 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
49474 case PPPIOCSPASS:
49475 {
49476 struct sock_filter *code;
49477-
49478 err = get_filter(argp, &code);
49479 if (err >= 0) {
49480- struct sock_fprog_kern fprog = {
49481- .len = err,
49482- .filter = code,
49483- };
49484-
49485 ppp_lock(ppp);
49486- if (ppp->pass_filter) {
49487- sk_unattached_filter_destroy(ppp->pass_filter);
49488- ppp->pass_filter = NULL;
49489- }
49490- if (fprog.filter != NULL)
49491- err = sk_unattached_filter_create(&ppp->pass_filter,
49492- &fprog);
49493- else
49494- err = 0;
49495- kfree(code);
49496+ kfree(ppp->pass_filter);
49497+ ppp->pass_filter = code;
49498+ ppp->pass_len = err;
49499 ppp_unlock(ppp);
49500+ err = 0;
49501 }
49502 break;
49503 }
49504 case PPPIOCSACTIVE:
49505 {
49506 struct sock_filter *code;
49507-
49508 err = get_filter(argp, &code);
49509 if (err >= 0) {
49510- struct sock_fprog_kern fprog = {
49511- .len = err,
49512- .filter = code,
49513- };
49514-
49515 ppp_lock(ppp);
49516- if (ppp->active_filter) {
49517- sk_unattached_filter_destroy(ppp->active_filter);
49518- ppp->active_filter = NULL;
49519- }
49520- if (fprog.filter != NULL)
49521- err = sk_unattached_filter_create(&ppp->active_filter,
49522- &fprog);
49523- else
49524- err = 0;
49525- kfree(code);
49526+ kfree(ppp->active_filter);
49527+ ppp->active_filter = code;
49528+ ppp->active_len = err;
49529 ppp_unlock(ppp);
49530+ err = 0;
49531 }
49532 break;
49533 }
49534@@ -1201,7 +1184,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
49535 a four-byte PPP header on each packet */
49536 *skb_push(skb, 2) = 1;
49537 if (ppp->pass_filter &&
49538- SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
49539+ sk_run_filter(skb, ppp->pass_filter) == 0) {
49540 if (ppp->debug & 1)
49541 netdev_printk(KERN_DEBUG, ppp->dev,
49542 "PPP: outbound frame "
49543@@ -1211,7 +1194,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
49544 }
49545 /* if this packet passes the active filter, record the time */
49546 if (!(ppp->active_filter &&
49547- SK_RUN_FILTER(ppp->active_filter, skb) == 0))
49548+ sk_run_filter(skb, ppp->active_filter) == 0))
49549 ppp->last_xmit = jiffies;
49550 skb_pull(skb, 2);
49551 #else
49552@@ -1835,7 +1818,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
49553
49554 *skb_push(skb, 2) = 0;
49555 if (ppp->pass_filter &&
49556- SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
49557+ sk_run_filter(skb, ppp->pass_filter) == 0) {
49558 if (ppp->debug & 1)
49559 netdev_printk(KERN_DEBUG, ppp->dev,
49560 "PPP: inbound frame "
49561@@ -1844,7 +1827,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
49562 return;
49563 }
49564 if (!(ppp->active_filter &&
49565- SK_RUN_FILTER(ppp->active_filter, skb) == 0))
49566+ sk_run_filter(skb, ppp->active_filter) == 0))
49567 ppp->last_recv = jiffies;
49568 __skb_pull(skb, 2);
49569 } else
49570@@ -2689,10 +2672,6 @@ ppp_create_interface(struct net *net, int unit, int *retp)
49571 ppp->minseq = -1;
49572 skb_queue_head_init(&ppp->mrq);
49573 #endif /* CONFIG_PPP_MULTILINK */
49574-#ifdef CONFIG_PPP_FILTER
49575- ppp->pass_filter = NULL;
49576- ppp->active_filter = NULL;
49577-#endif /* CONFIG_PPP_FILTER */
49578
49579 /*
49580 * drum roll: don't forget to set
49581@@ -2823,15 +2802,10 @@ static void ppp_destroy_interface(struct ppp *ppp)
49582 skb_queue_purge(&ppp->mrq);
49583 #endif /* CONFIG_PPP_MULTILINK */
49584 #ifdef CONFIG_PPP_FILTER
49585- if (ppp->pass_filter) {
49586- sk_unattached_filter_destroy(ppp->pass_filter);
49587- ppp->pass_filter = NULL;
49588- }
49589-
49590- if (ppp->active_filter) {
49591- sk_unattached_filter_destroy(ppp->active_filter);
49592- ppp->active_filter = NULL;
49593- }
49594+ kfree(ppp->pass_filter);
49595+ ppp->pass_filter = NULL;
49596+ kfree(ppp->active_filter);
49597+ ppp->active_filter = NULL;
49598 #endif /* CONFIG_PPP_FILTER */
49599
49600 kfree_skb(ppp->xmit_pending);
49601diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
49602index 1252d9c..80e660b 100644
49603--- a/drivers/net/slip/slhc.c
49604+++ b/drivers/net/slip/slhc.c
49605@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
49606 register struct tcphdr *thp;
49607 register struct iphdr *ip;
49608 register struct cstate *cs;
49609- int len, hdrlen;
49610+ long len, hdrlen;
49611 unsigned char *cp = icp;
49612
49613 /* We've got a compressed packet; read the change byte */
49614diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
49615index b4958c7..277cb96 100644
49616--- a/drivers/net/team/team.c
49617+++ b/drivers/net/team/team.c
49618@@ -2868,7 +2868,7 @@ static int team_device_event(struct notifier_block *unused,
49619 return NOTIFY_DONE;
49620 }
49621
49622-static struct notifier_block team_notifier_block __read_mostly = {
49623+static struct notifier_block team_notifier_block = {
49624 .notifier_call = team_device_event,
49625 };
49626
49627diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
49628index a58dfeb..dbde341 100644
49629--- a/drivers/net/team/team_mode_loadbalance.c
49630+++ b/drivers/net/team/team_mode_loadbalance.c
49631@@ -49,7 +49,7 @@ struct lb_port_mapping {
49632 struct lb_priv_ex {
49633 struct team *team;
49634 struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
49635- struct sock_fprog_kern *orig_fprog;
49636+ struct sock_fprog *orig_fprog;
49637 struct {
49638 unsigned int refresh_interval; /* in tenths of second */
49639 struct delayed_work refresh_dw;
49640@@ -241,15 +241,15 @@ static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
49641 return 0;
49642 }
49643
49644-static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
49645+static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
49646 const void *data)
49647 {
49648- struct sock_fprog_kern *fprog;
49649+ struct sock_fprog *fprog;
49650 struct sock_filter *filter = (struct sock_filter *) data;
49651
49652 if (data_len % sizeof(struct sock_filter))
49653 return -EINVAL;
49654- fprog = kmalloc(sizeof(*fprog), GFP_KERNEL);
49655+ fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
49656 if (!fprog)
49657 return -ENOMEM;
49658 fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
49659@@ -262,7 +262,7 @@ static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
49660 return 0;
49661 }
49662
49663-static void __fprog_destroy(struct sock_fprog_kern *fprog)
49664+static void __fprog_destroy(struct sock_fprog *fprog)
49665 {
49666 kfree(fprog->filter);
49667 kfree(fprog);
49668@@ -273,7 +273,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
49669 struct lb_priv *lb_priv = get_lb_priv(team);
49670 struct sk_filter *fp = NULL;
49671 struct sk_filter *orig_fp;
49672- struct sock_fprog_kern *fprog = NULL;
49673+ struct sock_fprog *fprog = NULL;
49674 int err;
49675
49676 if (ctx->data.bin_val.len) {
49677diff --git a/drivers/net/tun.c b/drivers/net/tun.c
49678index 98bad1f..f197d7a 100644
49679--- a/drivers/net/tun.c
49680+++ b/drivers/net/tun.c
49681@@ -1854,7 +1854,7 @@ unlock:
49682 }
49683
49684 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49685- unsigned long arg, int ifreq_len)
49686+ unsigned long arg, size_t ifreq_len)
49687 {
49688 struct tun_file *tfile = file->private_data;
49689 struct tun_struct *tun;
49690@@ -1867,6 +1867,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49691 unsigned int ifindex;
49692 int ret;
49693
49694+ if (ifreq_len > sizeof ifr)
49695+ return -EFAULT;
49696+
49697 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
49698 if (copy_from_user(&ifr, argp, ifreq_len))
49699 return -EFAULT;
49700diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
49701index a4272ed..cdd69ff 100644
49702--- a/drivers/net/usb/hso.c
49703+++ b/drivers/net/usb/hso.c
49704@@ -71,7 +71,7 @@
49705 #include <asm/byteorder.h>
49706 #include <linux/serial_core.h>
49707 #include <linux/serial.h>
49708-
49709+#include <asm/local.h>
49710
49711 #define MOD_AUTHOR "Option Wireless"
49712 #define MOD_DESCRIPTION "USB High Speed Option driver"
49713@@ -1177,7 +1177,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
49714 struct urb *urb;
49715
49716 urb = serial->rx_urb[0];
49717- if (serial->port.count > 0) {
49718+ if (atomic_read(&serial->port.count) > 0) {
49719 count = put_rxbuf_data(urb, serial);
49720 if (count == -1)
49721 return;
49722@@ -1215,7 +1215,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
49723 DUMP1(urb->transfer_buffer, urb->actual_length);
49724
49725 /* Anyone listening? */
49726- if (serial->port.count == 0)
49727+ if (atomic_read(&serial->port.count) == 0)
49728 return;
49729
49730 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
49731@@ -1277,8 +1277,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49732 tty_port_tty_set(&serial->port, tty);
49733
49734 /* check for port already opened, if not set the termios */
49735- serial->port.count++;
49736- if (serial->port.count == 1) {
49737+ if (atomic_inc_return(&serial->port.count) == 1) {
49738 serial->rx_state = RX_IDLE;
49739 /* Force default termio settings */
49740 _hso_serial_set_termios(tty, NULL);
49741@@ -1288,7 +1287,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49742 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
49743 if (result) {
49744 hso_stop_serial_device(serial->parent);
49745- serial->port.count--;
49746+ atomic_dec(&serial->port.count);
49747 kref_put(&serial->parent->ref, hso_serial_ref_free);
49748 }
49749 } else {
49750@@ -1325,10 +1324,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
49751
49752 /* reset the rts and dtr */
49753 /* do the actual close */
49754- serial->port.count--;
49755+ atomic_dec(&serial->port.count);
49756
49757- if (serial->port.count <= 0) {
49758- serial->port.count = 0;
49759+ if (atomic_read(&serial->port.count) <= 0) {
49760+ atomic_set(&serial->port.count, 0);
49761 tty_port_tty_set(&serial->port, NULL);
49762 if (!usb_gone)
49763 hso_stop_serial_device(serial->parent);
49764@@ -1403,7 +1402,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
49765
49766 /* the actual setup */
49767 spin_lock_irqsave(&serial->serial_lock, flags);
49768- if (serial->port.count)
49769+ if (atomic_read(&serial->port.count))
49770 _hso_serial_set_termios(tty, old);
49771 else
49772 tty->termios = *old;
49773@@ -1872,7 +1871,7 @@ static void intr_callback(struct urb *urb)
49774 D1("Pending read interrupt on port %d\n", i);
49775 spin_lock(&serial->serial_lock);
49776 if (serial->rx_state == RX_IDLE &&
49777- serial->port.count > 0) {
49778+ atomic_read(&serial->port.count) > 0) {
49779 /* Setup and send a ctrl req read on
49780 * port i */
49781 if (!serial->rx_urb_filled[0]) {
49782@@ -3045,7 +3044,7 @@ static int hso_resume(struct usb_interface *iface)
49783 /* Start all serial ports */
49784 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
49785 if (serial_table[i] && (serial_table[i]->interface == iface)) {
49786- if (dev2ser(serial_table[i])->port.count) {
49787+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
49788 result =
49789 hso_start_serial_device(serial_table[i], GFP_NOIO);
49790 hso_kick_transmit(dev2ser(serial_table[i]));
49791diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
49792index 3eab74c..fb6097c 100644
49793--- a/drivers/net/usb/r8152.c
49794+++ b/drivers/net/usb/r8152.c
49795@@ -567,7 +567,7 @@ struct r8152 {
49796 void (*up)(struct r8152 *);
49797 void (*down)(struct r8152 *);
49798 void (*unload)(struct r8152 *);
49799- } rtl_ops;
49800+ } __no_const rtl_ops;
49801
49802 int intr_interval;
49803 u32 saved_wolopts;
49804diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
49805index a2515887..6d13233 100644
49806--- a/drivers/net/usb/sierra_net.c
49807+++ b/drivers/net/usb/sierra_net.c
49808@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
49809 /* atomic counter partially included in MAC address to make sure 2 devices
49810 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
49811 */
49812-static atomic_t iface_counter = ATOMIC_INIT(0);
49813+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
49814
49815 /*
49816 * SYNC Timer Delay definition used to set the expiry time
49817@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
49818 dev->net->netdev_ops = &sierra_net_device_ops;
49819
49820 /* change MAC addr to include, ifacenum, and to be unique */
49821- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
49822+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
49823 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
49824
49825 /* we will have to manufacture ethernet headers, prepare template */
49826diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
49827index 7d9f84a..7f690da 100644
49828--- a/drivers/net/virtio_net.c
49829+++ b/drivers/net/virtio_net.c
49830@@ -47,7 +47,7 @@ module_param(gso, bool, 0444);
49831 #define RECEIVE_AVG_WEIGHT 64
49832
49833 /* Minimum alignment for mergeable packet buffers. */
49834-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
49835+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
49836
49837 #define VIRTNET_DRIVER_VERSION "1.0.0"
49838
49839diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
49840index 9f79192..838cf95 100644
49841--- a/drivers/net/vxlan.c
49842+++ b/drivers/net/vxlan.c
49843@@ -2838,7 +2838,7 @@ nla_put_failure:
49844 return -EMSGSIZE;
49845 }
49846
49847-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
49848+static struct rtnl_link_ops vxlan_link_ops = {
49849 .kind = "vxlan",
49850 .maxtype = IFLA_VXLAN_MAX,
49851 .policy = vxlan_policy,
49852@@ -2885,7 +2885,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
49853 return NOTIFY_DONE;
49854 }
49855
49856-static struct notifier_block vxlan_notifier_block __read_mostly = {
49857+static struct notifier_block vxlan_notifier_block = {
49858 .notifier_call = vxlan_lowerdev_event,
49859 };
49860
49861diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
49862index 5920c99..ff2e4a5 100644
49863--- a/drivers/net/wan/lmc/lmc_media.c
49864+++ b/drivers/net/wan/lmc/lmc_media.c
49865@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
49866 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
49867
49868 lmc_media_t lmc_ds3_media = {
49869- lmc_ds3_init, /* special media init stuff */
49870- lmc_ds3_default, /* reset to default state */
49871- lmc_ds3_set_status, /* reset status to state provided */
49872- lmc_dummy_set_1, /* set clock source */
49873- lmc_dummy_set2_1, /* set line speed */
49874- lmc_ds3_set_100ft, /* set cable length */
49875- lmc_ds3_set_scram, /* set scrambler */
49876- lmc_ds3_get_link_status, /* get link status */
49877- lmc_dummy_set_1, /* set link status */
49878- lmc_ds3_set_crc_length, /* set CRC length */
49879- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49880- lmc_ds3_watchdog
49881+ .init = lmc_ds3_init, /* special media init stuff */
49882+ .defaults = lmc_ds3_default, /* reset to default state */
49883+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
49884+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
49885+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49886+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
49887+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
49888+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
49889+ .set_link_status = lmc_dummy_set_1, /* set link status */
49890+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
49891+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49892+ .watchdog = lmc_ds3_watchdog
49893 };
49894
49895 lmc_media_t lmc_hssi_media = {
49896- lmc_hssi_init, /* special media init stuff */
49897- lmc_hssi_default, /* reset to default state */
49898- lmc_hssi_set_status, /* reset status to state provided */
49899- lmc_hssi_set_clock, /* set clock source */
49900- lmc_dummy_set2_1, /* set line speed */
49901- lmc_dummy_set_1, /* set cable length */
49902- lmc_dummy_set_1, /* set scrambler */
49903- lmc_hssi_get_link_status, /* get link status */
49904- lmc_hssi_set_link_status, /* set link status */
49905- lmc_hssi_set_crc_length, /* set CRC length */
49906- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49907- lmc_hssi_watchdog
49908+ .init = lmc_hssi_init, /* special media init stuff */
49909+ .defaults = lmc_hssi_default, /* reset to default state */
49910+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
49911+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
49912+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49913+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49914+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49915+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
49916+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
49917+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
49918+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49919+ .watchdog = lmc_hssi_watchdog
49920 };
49921
49922-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
49923- lmc_ssi_default, /* reset to default state */
49924- lmc_ssi_set_status, /* reset status to state provided */
49925- lmc_ssi_set_clock, /* set clock source */
49926- lmc_ssi_set_speed, /* set line speed */
49927- lmc_dummy_set_1, /* set cable length */
49928- lmc_dummy_set_1, /* set scrambler */
49929- lmc_ssi_get_link_status, /* get link status */
49930- lmc_ssi_set_link_status, /* set link status */
49931- lmc_ssi_set_crc_length, /* set CRC length */
49932- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49933- lmc_ssi_watchdog
49934+lmc_media_t lmc_ssi_media = {
49935+ .init = lmc_ssi_init, /* special media init stuff */
49936+ .defaults = lmc_ssi_default, /* reset to default state */
49937+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
49938+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
49939+ .set_speed = lmc_ssi_set_speed, /* set line speed */
49940+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49941+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49942+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
49943+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
49944+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
49945+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49946+ .watchdog = lmc_ssi_watchdog
49947 };
49948
49949 lmc_media_t lmc_t1_media = {
49950- lmc_t1_init, /* special media init stuff */
49951- lmc_t1_default, /* reset to default state */
49952- lmc_t1_set_status, /* reset status to state provided */
49953- lmc_t1_set_clock, /* set clock source */
49954- lmc_dummy_set2_1, /* set line speed */
49955- lmc_dummy_set_1, /* set cable length */
49956- lmc_dummy_set_1, /* set scrambler */
49957- lmc_t1_get_link_status, /* get link status */
49958- lmc_dummy_set_1, /* set link status */
49959- lmc_t1_set_crc_length, /* set CRC length */
49960- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49961- lmc_t1_watchdog
49962+ .init = lmc_t1_init, /* special media init stuff */
49963+ .defaults = lmc_t1_default, /* reset to default state */
49964+ .set_status = lmc_t1_set_status, /* reset status to state provided */
49965+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
49966+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49967+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49968+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49969+ .get_link_status = lmc_t1_get_link_status, /* get link status */
49970+ .set_link_status = lmc_dummy_set_1, /* set link status */
49971+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
49972+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49973+ .watchdog = lmc_t1_watchdog
49974 };
49975
49976 static void
49977diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
49978index feacc3b..5bac0de 100644
49979--- a/drivers/net/wan/z85230.c
49980+++ b/drivers/net/wan/z85230.c
49981@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
49982
49983 struct z8530_irqhandler z8530_sync =
49984 {
49985- z8530_rx,
49986- z8530_tx,
49987- z8530_status
49988+ .rx = z8530_rx,
49989+ .tx = z8530_tx,
49990+ .status = z8530_status
49991 };
49992
49993 EXPORT_SYMBOL(z8530_sync);
49994@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
49995 }
49996
49997 static struct z8530_irqhandler z8530_dma_sync = {
49998- z8530_dma_rx,
49999- z8530_dma_tx,
50000- z8530_dma_status
50001+ .rx = z8530_dma_rx,
50002+ .tx = z8530_dma_tx,
50003+ .status = z8530_dma_status
50004 };
50005
50006 static struct z8530_irqhandler z8530_txdma_sync = {
50007- z8530_rx,
50008- z8530_dma_tx,
50009- z8530_dma_status
50010+ .rx = z8530_rx,
50011+ .tx = z8530_dma_tx,
50012+ .status = z8530_dma_status
50013 };
50014
50015 /**
50016@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
50017
50018 struct z8530_irqhandler z8530_nop=
50019 {
50020- z8530_rx_clear,
50021- z8530_tx_clear,
50022- z8530_status_clear
50023+ .rx = z8530_rx_clear,
50024+ .tx = z8530_tx_clear,
50025+ .status = z8530_status_clear
50026 };
50027
50028
50029diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
50030index 0b60295..b8bfa5b 100644
50031--- a/drivers/net/wimax/i2400m/rx.c
50032+++ b/drivers/net/wimax/i2400m/rx.c
50033@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
50034 if (i2400m->rx_roq == NULL)
50035 goto error_roq_alloc;
50036
50037- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
50038+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
50039 GFP_KERNEL);
50040 if (rd == NULL) {
50041 result = -ENOMEM;
50042diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
50043index 64747d4..17c4cf3 100644
50044--- a/drivers/net/wireless/airo.c
50045+++ b/drivers/net/wireless/airo.c
50046@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
50047 struct airo_info *ai = dev->ml_priv;
50048 int ridcode;
50049 int enabled;
50050- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
50051+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
50052 unsigned char *iobuf;
50053
50054 /* Only super-user can write RIDs */
50055diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
50056index d48776e..373d049 100644
50057--- a/drivers/net/wireless/at76c50x-usb.c
50058+++ b/drivers/net/wireless/at76c50x-usb.c
50059@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
50060 }
50061
50062 /* Convert timeout from the DFU status to jiffies */
50063-static inline unsigned long at76_get_timeout(struct dfu_status *s)
50064+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
50065 {
50066 return msecs_to_jiffies((s->poll_timeout[2] << 16)
50067 | (s->poll_timeout[1] << 8)
50068diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
50069index e493db4..2c1853a 100644
50070--- a/drivers/net/wireless/ath/ath10k/htc.c
50071+++ b/drivers/net/wireless/ath/ath10k/htc.c
50072@@ -840,7 +840,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
50073 /* registered target arrival callback from the HIF layer */
50074 int ath10k_htc_init(struct ath10k *ar)
50075 {
50076- struct ath10k_hif_cb htc_callbacks;
50077+ static struct ath10k_hif_cb htc_callbacks = {
50078+ .rx_completion = ath10k_htc_rx_completion_handler,
50079+ .tx_completion = ath10k_htc_tx_completion_handler,
50080+ };
50081 struct ath10k_htc_ep *ep = NULL;
50082 struct ath10k_htc *htc = &ar->htc;
50083
50084@@ -850,8 +853,6 @@ int ath10k_htc_init(struct ath10k *ar)
50085 ath10k_htc_reset_endpoint_states(htc);
50086
50087 /* setup HIF layer callbacks */
50088- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
50089- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
50090 htc->ar = ar;
50091
50092 /* Get HIF default pipe for HTC message exchange */
50093diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
50094index 4716d33..a688310 100644
50095--- a/drivers/net/wireless/ath/ath10k/htc.h
50096+++ b/drivers/net/wireless/ath/ath10k/htc.h
50097@@ -271,13 +271,13 @@ enum ath10k_htc_ep_id {
50098
50099 struct ath10k_htc_ops {
50100 void (*target_send_suspend_complete)(struct ath10k *ar);
50101-};
50102+} __no_const;
50103
50104 struct ath10k_htc_ep_ops {
50105 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
50106 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
50107 void (*ep_tx_credits)(struct ath10k *);
50108-};
50109+} __no_const;
50110
50111 /* service connection information */
50112 struct ath10k_htc_svc_conn_req {
50113diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50114index 741b38d..b7ae41b 100644
50115--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50116+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50117@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50118 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
50119 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
50120
50121- ACCESS_ONCE(ads->ds_link) = i->link;
50122- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
50123+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
50124+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
50125
50126 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
50127 ctl6 = SM(i->keytype, AR_EncrType);
50128@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50129
50130 if ((i->is_first || i->is_last) &&
50131 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
50132- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
50133+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
50134 | set11nTries(i->rates, 1)
50135 | set11nTries(i->rates, 2)
50136 | set11nTries(i->rates, 3)
50137 | (i->dur_update ? AR_DurUpdateEna : 0)
50138 | SM(0, AR_BurstDur);
50139
50140- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
50141+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
50142 | set11nRate(i->rates, 1)
50143 | set11nRate(i->rates, 2)
50144 | set11nRate(i->rates, 3);
50145 } else {
50146- ACCESS_ONCE(ads->ds_ctl2) = 0;
50147- ACCESS_ONCE(ads->ds_ctl3) = 0;
50148+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
50149+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
50150 }
50151
50152 if (!i->is_first) {
50153- ACCESS_ONCE(ads->ds_ctl0) = 0;
50154- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50155- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50156+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
50157+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50158+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50159 return;
50160 }
50161
50162@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50163 break;
50164 }
50165
50166- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50167+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50168 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50169 | SM(i->txpower, AR_XmitPower)
50170 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50171@@ -289,19 +289,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50172 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
50173 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
50174
50175- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50176- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50177+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50178+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50179
50180 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
50181 return;
50182
50183- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50184+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50185 | set11nPktDurRTSCTS(i->rates, 1);
50186
50187- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50188+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50189 | set11nPktDurRTSCTS(i->rates, 3);
50190
50191- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50192+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50193 | set11nRateFlags(i->rates, 1)
50194 | set11nRateFlags(i->rates, 2)
50195 | set11nRateFlags(i->rates, 3)
50196diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50197index 729ffbf..49f50e3 100644
50198--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50199+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50200@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50201 (i->qcu << AR_TxQcuNum_S) | desc_len;
50202
50203 checksum += val;
50204- ACCESS_ONCE(ads->info) = val;
50205+ ACCESS_ONCE_RW(ads->info) = val;
50206
50207 checksum += i->link;
50208- ACCESS_ONCE(ads->link) = i->link;
50209+ ACCESS_ONCE_RW(ads->link) = i->link;
50210
50211 checksum += i->buf_addr[0];
50212- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
50213+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
50214 checksum += i->buf_addr[1];
50215- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
50216+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
50217 checksum += i->buf_addr[2];
50218- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
50219+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
50220 checksum += i->buf_addr[3];
50221- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
50222+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
50223
50224 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
50225- ACCESS_ONCE(ads->ctl3) = val;
50226+ ACCESS_ONCE_RW(ads->ctl3) = val;
50227 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
50228- ACCESS_ONCE(ads->ctl5) = val;
50229+ ACCESS_ONCE_RW(ads->ctl5) = val;
50230 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
50231- ACCESS_ONCE(ads->ctl7) = val;
50232+ ACCESS_ONCE_RW(ads->ctl7) = val;
50233 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
50234- ACCESS_ONCE(ads->ctl9) = val;
50235+ ACCESS_ONCE_RW(ads->ctl9) = val;
50236
50237 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
50238- ACCESS_ONCE(ads->ctl10) = checksum;
50239+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
50240
50241 if (i->is_first || i->is_last) {
50242- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
50243+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
50244 | set11nTries(i->rates, 1)
50245 | set11nTries(i->rates, 2)
50246 | set11nTries(i->rates, 3)
50247 | (i->dur_update ? AR_DurUpdateEna : 0)
50248 | SM(0, AR_BurstDur);
50249
50250- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
50251+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
50252 | set11nRate(i->rates, 1)
50253 | set11nRate(i->rates, 2)
50254 | set11nRate(i->rates, 3);
50255 } else {
50256- ACCESS_ONCE(ads->ctl13) = 0;
50257- ACCESS_ONCE(ads->ctl14) = 0;
50258+ ACCESS_ONCE_RW(ads->ctl13) = 0;
50259+ ACCESS_ONCE_RW(ads->ctl14) = 0;
50260 }
50261
50262 ads->ctl20 = 0;
50263@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50264
50265 ctl17 = SM(i->keytype, AR_EncrType);
50266 if (!i->is_first) {
50267- ACCESS_ONCE(ads->ctl11) = 0;
50268- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50269- ACCESS_ONCE(ads->ctl15) = 0;
50270- ACCESS_ONCE(ads->ctl16) = 0;
50271- ACCESS_ONCE(ads->ctl17) = ctl17;
50272- ACCESS_ONCE(ads->ctl18) = 0;
50273- ACCESS_ONCE(ads->ctl19) = 0;
50274+ ACCESS_ONCE_RW(ads->ctl11) = 0;
50275+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50276+ ACCESS_ONCE_RW(ads->ctl15) = 0;
50277+ ACCESS_ONCE_RW(ads->ctl16) = 0;
50278+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50279+ ACCESS_ONCE_RW(ads->ctl18) = 0;
50280+ ACCESS_ONCE_RW(ads->ctl19) = 0;
50281 return;
50282 }
50283
50284- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50285+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50286 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50287 | SM(i->txpower, AR_XmitPower)
50288 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50289@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50290 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
50291 ctl12 |= SM(val, AR_PAPRDChainMask);
50292
50293- ACCESS_ONCE(ads->ctl12) = ctl12;
50294- ACCESS_ONCE(ads->ctl17) = ctl17;
50295+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
50296+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50297
50298- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50299+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50300 | set11nPktDurRTSCTS(i->rates, 1);
50301
50302- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50303+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50304 | set11nPktDurRTSCTS(i->rates, 3);
50305
50306- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
50307+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
50308 | set11nRateFlags(i->rates, 1)
50309 | set11nRateFlags(i->rates, 2)
50310 | set11nRateFlags(i->rates, 3)
50311 | SM(i->rtscts_rate, AR_RTSCTSRate);
50312
50313- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
50314+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
50315 }
50316
50317 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
50318diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
50319index 0acd4b5..0591c91 100644
50320--- a/drivers/net/wireless/ath/ath9k/hw.h
50321+++ b/drivers/net/wireless/ath/ath9k/hw.h
50322@@ -629,7 +629,7 @@ struct ath_hw_private_ops {
50323
50324 /* ANI */
50325 void (*ani_cache_ini_regs)(struct ath_hw *ah);
50326-};
50327+} __no_const;
50328
50329 /**
50330 * struct ath_spec_scan - parameters for Atheros spectral scan
50331@@ -706,7 +706,7 @@ struct ath_hw_ops {
50332 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
50333 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
50334 #endif
50335-};
50336+} __no_const;
50337
50338 struct ath_nf_limits {
50339 s16 max;
50340diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
50341index 92190da..f3a4c4c 100644
50342--- a/drivers/net/wireless/b43/phy_lp.c
50343+++ b/drivers/net/wireless/b43/phy_lp.c
50344@@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
50345 {
50346 struct ssb_bus *bus = dev->dev->sdev->bus;
50347
50348- static const struct b206x_channel *chandata = NULL;
50349+ const struct b206x_channel *chandata = NULL;
50350 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
50351 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
50352 u16 old_comm15, scale;
50353diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
50354index dc1d20c..f7a4f06 100644
50355--- a/drivers/net/wireless/iwlegacy/3945-mac.c
50356+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
50357@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
50358 */
50359 if (il3945_mod_params.disable_hw_scan) {
50360 D_INFO("Disabling hw_scan\n");
50361- il3945_mac_ops.hw_scan = NULL;
50362+ pax_open_kernel();
50363+ *(void **)&il3945_mac_ops.hw_scan = NULL;
50364+ pax_close_kernel();
50365 }
50366
50367 D_INFO("*** LOAD DRIVER ***\n");
50368diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50369index 0ffb6ff..c0b7f0e 100644
50370--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50371+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50372@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
50373 {
50374 struct iwl_priv *priv = file->private_data;
50375 char buf[64];
50376- int buf_size;
50377+ size_t buf_size;
50378 u32 offset, len;
50379
50380 memset(buf, 0, sizeof(buf));
50381@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
50382 struct iwl_priv *priv = file->private_data;
50383
50384 char buf[8];
50385- int buf_size;
50386+ size_t buf_size;
50387 u32 reset_flag;
50388
50389 memset(buf, 0, sizeof(buf));
50390@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
50391 {
50392 struct iwl_priv *priv = file->private_data;
50393 char buf[8];
50394- int buf_size;
50395+ size_t buf_size;
50396 int ht40;
50397
50398 memset(buf, 0, sizeof(buf));
50399@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
50400 {
50401 struct iwl_priv *priv = file->private_data;
50402 char buf[8];
50403- int buf_size;
50404+ size_t buf_size;
50405 int value;
50406
50407 memset(buf, 0, sizeof(buf));
50408@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
50409 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
50410 DEBUGFS_READ_FILE_OPS(current_sleep_command);
50411
50412-static const char *fmt_value = " %-30s %10u\n";
50413-static const char *fmt_hex = " %-30s 0x%02X\n";
50414-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
50415-static const char *fmt_header =
50416+static const char fmt_value[] = " %-30s %10u\n";
50417+static const char fmt_hex[] = " %-30s 0x%02X\n";
50418+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
50419+static const char fmt_header[] =
50420 "%-32s current cumulative delta max\n";
50421
50422 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
50423@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
50424 {
50425 struct iwl_priv *priv = file->private_data;
50426 char buf[8];
50427- int buf_size;
50428+ size_t buf_size;
50429 int clear;
50430
50431 memset(buf, 0, sizeof(buf));
50432@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
50433 {
50434 struct iwl_priv *priv = file->private_data;
50435 char buf[8];
50436- int buf_size;
50437+ size_t buf_size;
50438 int trace;
50439
50440 memset(buf, 0, sizeof(buf));
50441@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
50442 {
50443 struct iwl_priv *priv = file->private_data;
50444 char buf[8];
50445- int buf_size;
50446+ size_t buf_size;
50447 int missed;
50448
50449 memset(buf, 0, sizeof(buf));
50450@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
50451
50452 struct iwl_priv *priv = file->private_data;
50453 char buf[8];
50454- int buf_size;
50455+ size_t buf_size;
50456 int plcp;
50457
50458 memset(buf, 0, sizeof(buf));
50459@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
50460
50461 struct iwl_priv *priv = file->private_data;
50462 char buf[8];
50463- int buf_size;
50464+ size_t buf_size;
50465 int flush;
50466
50467 memset(buf, 0, sizeof(buf));
50468@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
50469
50470 struct iwl_priv *priv = file->private_data;
50471 char buf[8];
50472- int buf_size;
50473+ size_t buf_size;
50474 int rts;
50475
50476 if (!priv->cfg->ht_params)
50477@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
50478 {
50479 struct iwl_priv *priv = file->private_data;
50480 char buf[8];
50481- int buf_size;
50482+ size_t buf_size;
50483
50484 memset(buf, 0, sizeof(buf));
50485 buf_size = min(count, sizeof(buf) - 1);
50486@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
50487 struct iwl_priv *priv = file->private_data;
50488 u32 event_log_flag;
50489 char buf[8];
50490- int buf_size;
50491+ size_t buf_size;
50492
50493 /* check that the interface is up */
50494 if (!iwl_is_ready(priv))
50495@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
50496 struct iwl_priv *priv = file->private_data;
50497 char buf[8];
50498 u32 calib_disabled;
50499- int buf_size;
50500+ size_t buf_size;
50501
50502 memset(buf, 0, sizeof(buf));
50503 buf_size = min(count, sizeof(buf) - 1);
50504diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
50505index 788085b..0bc852a 100644
50506--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
50507+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
50508@@ -1598,7 +1598,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
50509 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
50510
50511 char buf[8];
50512- int buf_size;
50513+ size_t buf_size;
50514 u32 reset_flag;
50515
50516 memset(buf, 0, sizeof(buf));
50517@@ -1619,7 +1619,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
50518 {
50519 struct iwl_trans *trans = file->private_data;
50520 char buf[8];
50521- int buf_size;
50522+ size_t buf_size;
50523 int csr;
50524
50525 memset(buf, 0, sizeof(buf));
50526diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
50527index a312c65..162b13a 100644
50528--- a/drivers/net/wireless/mac80211_hwsim.c
50529+++ b/drivers/net/wireless/mac80211_hwsim.c
50530@@ -2573,20 +2573,20 @@ static int __init init_mac80211_hwsim(void)
50531 if (channels < 1)
50532 return -EINVAL;
50533
50534- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
50535- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50536- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50537- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50538- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50539- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50540- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50541- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50542- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50543- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50544- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
50545- mac80211_hwsim_assign_vif_chanctx;
50546- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
50547- mac80211_hwsim_unassign_vif_chanctx;
50548+ pax_open_kernel();
50549+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
50550+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50551+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50552+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50553+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50554+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50555+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50556+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50557+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50558+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50559+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
50560+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
50561+ pax_close_kernel();
50562
50563 spin_lock_init(&hwsim_radio_lock);
50564 INIT_LIST_HEAD(&hwsim_radios);
50565diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
50566index d2a9a08..0cb175d 100644
50567--- a/drivers/net/wireless/rndis_wlan.c
50568+++ b/drivers/net/wireless/rndis_wlan.c
50569@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
50570
50571 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
50572
50573- if (rts_threshold < 0 || rts_threshold > 2347)
50574+ if (rts_threshold > 2347)
50575 rts_threshold = 2347;
50576
50577 tmp = cpu_to_le32(rts_threshold);
50578diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
50579index d13f25c..2573994 100644
50580--- a/drivers/net/wireless/rt2x00/rt2x00.h
50581+++ b/drivers/net/wireless/rt2x00/rt2x00.h
50582@@ -375,7 +375,7 @@ struct rt2x00_intf {
50583 * for hardware which doesn't support hardware
50584 * sequence counting.
50585 */
50586- atomic_t seqno;
50587+ atomic_unchecked_t seqno;
50588 };
50589
50590 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
50591diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
50592index 5642ccc..01f03eb 100644
50593--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
50594+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
50595@@ -250,9 +250,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
50596 * sequence counter given by mac80211.
50597 */
50598 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
50599- seqno = atomic_add_return(0x10, &intf->seqno);
50600+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
50601 else
50602- seqno = atomic_read(&intf->seqno);
50603+ seqno = atomic_read_unchecked(&intf->seqno);
50604
50605 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
50606 hdr->seq_ctrl |= cpu_to_le16(seqno);
50607diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
50608index b661f896..ddf7d2b 100644
50609--- a/drivers/net/wireless/ti/wl1251/sdio.c
50610+++ b/drivers/net/wireless/ti/wl1251/sdio.c
50611@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
50612
50613 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
50614
50615- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50616- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50617+ pax_open_kernel();
50618+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50619+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50620+ pax_close_kernel();
50621
50622 wl1251_info("using dedicated interrupt line");
50623 } else {
50624- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50625- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50626+ pax_open_kernel();
50627+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50628+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50629+ pax_close_kernel();
50630
50631 wl1251_info("using SDIO interrupt");
50632 }
50633diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
50634index d50dfac..0a6f5be3 100644
50635--- a/drivers/net/wireless/ti/wl12xx/main.c
50636+++ b/drivers/net/wireless/ti/wl12xx/main.c
50637@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50638 sizeof(wl->conf.mem));
50639
50640 /* read data preparation is only needed by wl127x */
50641- wl->ops->prepare_read = wl127x_prepare_read;
50642+ pax_open_kernel();
50643+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50644+ pax_close_kernel();
50645
50646 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50647 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50648@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50649 sizeof(wl->conf.mem));
50650
50651 /* read data preparation is only needed by wl127x */
50652- wl->ops->prepare_read = wl127x_prepare_read;
50653+ pax_open_kernel();
50654+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50655+ pax_close_kernel();
50656
50657 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50658 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50659diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
50660index de5b4fa..7996ec6 100644
50661--- a/drivers/net/wireless/ti/wl18xx/main.c
50662+++ b/drivers/net/wireless/ti/wl18xx/main.c
50663@@ -1900,8 +1900,10 @@ static int wl18xx_setup(struct wl1271 *wl)
50664 }
50665
50666 if (!checksum_param) {
50667- wl18xx_ops.set_rx_csum = NULL;
50668- wl18xx_ops.init_vif = NULL;
50669+ pax_open_kernel();
50670+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
50671+ *(void **)&wl18xx_ops.init_vif = NULL;
50672+ pax_close_kernel();
50673 }
50674
50675 /* Enable 11a Band only if we have 5G antennas */
50676diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
50677index a912dc0..a8225ba 100644
50678--- a/drivers/net/wireless/zd1211rw/zd_usb.c
50679+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
50680@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
50681 {
50682 struct zd_usb *usb = urb->context;
50683 struct zd_usb_interrupt *intr = &usb->intr;
50684- int len;
50685+ unsigned int len;
50686 u16 int_num;
50687
50688 ZD_ASSERT(in_interrupt());
50689diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
50690index 683671a..4519fc2 100644
50691--- a/drivers/nfc/nfcwilink.c
50692+++ b/drivers/nfc/nfcwilink.c
50693@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
50694
50695 static int nfcwilink_probe(struct platform_device *pdev)
50696 {
50697- static struct nfcwilink *drv;
50698+ struct nfcwilink *drv;
50699 int rc;
50700 __u32 protocols;
50701
50702diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
50703index d93b2b6..ae50401 100644
50704--- a/drivers/oprofile/buffer_sync.c
50705+++ b/drivers/oprofile/buffer_sync.c
50706@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
50707 if (cookie == NO_COOKIE)
50708 offset = pc;
50709 if (cookie == INVALID_COOKIE) {
50710- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50711+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50712 offset = pc;
50713 }
50714 if (cookie != last_cookie) {
50715@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
50716 /* add userspace sample */
50717
50718 if (!mm) {
50719- atomic_inc(&oprofile_stats.sample_lost_no_mm);
50720+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
50721 return 0;
50722 }
50723
50724 cookie = lookup_dcookie(mm, s->eip, &offset);
50725
50726 if (cookie == INVALID_COOKIE) {
50727- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50728+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50729 return 0;
50730 }
50731
50732@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
50733 /* ignore backtraces if failed to add a sample */
50734 if (state == sb_bt_start) {
50735 state = sb_bt_ignore;
50736- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
50737+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
50738 }
50739 }
50740 release_mm(mm);
50741diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
50742index c0cc4e7..44d4e54 100644
50743--- a/drivers/oprofile/event_buffer.c
50744+++ b/drivers/oprofile/event_buffer.c
50745@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
50746 }
50747
50748 if (buffer_pos == buffer_size) {
50749- atomic_inc(&oprofile_stats.event_lost_overflow);
50750+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
50751 return;
50752 }
50753
50754diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
50755index ed2c3ec..deda85a 100644
50756--- a/drivers/oprofile/oprof.c
50757+++ b/drivers/oprofile/oprof.c
50758@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
50759 if (oprofile_ops.switch_events())
50760 return;
50761
50762- atomic_inc(&oprofile_stats.multiplex_counter);
50763+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
50764 start_switch_worker();
50765 }
50766
50767diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
50768index ee2cfce..7f8f699 100644
50769--- a/drivers/oprofile/oprofile_files.c
50770+++ b/drivers/oprofile/oprofile_files.c
50771@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
50772
50773 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
50774
50775-static ssize_t timeout_read(struct file *file, char __user *buf,
50776+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
50777 size_t count, loff_t *offset)
50778 {
50779 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
50780diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
50781index 59659ce..6c860a0 100644
50782--- a/drivers/oprofile/oprofile_stats.c
50783+++ b/drivers/oprofile/oprofile_stats.c
50784@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
50785 cpu_buf->sample_invalid_eip = 0;
50786 }
50787
50788- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
50789- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
50790- atomic_set(&oprofile_stats.event_lost_overflow, 0);
50791- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
50792- atomic_set(&oprofile_stats.multiplex_counter, 0);
50793+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
50794+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
50795+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
50796+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
50797+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
50798 }
50799
50800
50801diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
50802index 1fc622b..8c48fc3 100644
50803--- a/drivers/oprofile/oprofile_stats.h
50804+++ b/drivers/oprofile/oprofile_stats.h
50805@@ -13,11 +13,11 @@
50806 #include <linux/atomic.h>
50807
50808 struct oprofile_stat_struct {
50809- atomic_t sample_lost_no_mm;
50810- atomic_t sample_lost_no_mapping;
50811- atomic_t bt_lost_no_mapping;
50812- atomic_t event_lost_overflow;
50813- atomic_t multiplex_counter;
50814+ atomic_unchecked_t sample_lost_no_mm;
50815+ atomic_unchecked_t sample_lost_no_mapping;
50816+ atomic_unchecked_t bt_lost_no_mapping;
50817+ atomic_unchecked_t event_lost_overflow;
50818+ atomic_unchecked_t multiplex_counter;
50819 };
50820
50821 extern struct oprofile_stat_struct oprofile_stats;
50822diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
50823index 3f49345..c750d0b 100644
50824--- a/drivers/oprofile/oprofilefs.c
50825+++ b/drivers/oprofile/oprofilefs.c
50826@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
50827
50828 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
50829 {
50830- atomic_t *val = file->private_data;
50831- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
50832+ atomic_unchecked_t *val = file->private_data;
50833+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
50834 }
50835
50836
50837@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
50838
50839
50840 int oprofilefs_create_ro_atomic(struct dentry *root,
50841- char const *name, atomic_t *val)
50842+ char const *name, atomic_unchecked_t *val)
50843 {
50844 return __oprofilefs_create_file(root, name,
50845 &atomic_ro_fops, 0444, val);
50846diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
50847index 61be1d9..dec05d7 100644
50848--- a/drivers/oprofile/timer_int.c
50849+++ b/drivers/oprofile/timer_int.c
50850@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
50851 return NOTIFY_OK;
50852 }
50853
50854-static struct notifier_block __refdata oprofile_cpu_notifier = {
50855+static struct notifier_block oprofile_cpu_notifier = {
50856 .notifier_call = oprofile_cpu_notify,
50857 };
50858
50859diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
50860index 3b47080..6cd05dd 100644
50861--- a/drivers/parport/procfs.c
50862+++ b/drivers/parport/procfs.c
50863@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
50864
50865 *ppos += len;
50866
50867- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
50868+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
50869 }
50870
50871 #ifdef CONFIG_PARPORT_1284
50872@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
50873
50874 *ppos += len;
50875
50876- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
50877+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
50878 }
50879 #endif /* IEEE1284.3 support. */
50880
50881diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
50882index 8dcccff..35d701d 100644
50883--- a/drivers/pci/hotplug/acpiphp_ibm.c
50884+++ b/drivers/pci/hotplug/acpiphp_ibm.c
50885@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
50886 goto init_cleanup;
50887 }
50888
50889- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50890+ pax_open_kernel();
50891+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50892+ pax_close_kernel();
50893 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
50894
50895 return retval;
50896diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
50897index 04fcd78..39e83f1 100644
50898--- a/drivers/pci/hotplug/cpcihp_generic.c
50899+++ b/drivers/pci/hotplug/cpcihp_generic.c
50900@@ -73,7 +73,6 @@ static u16 port;
50901 static unsigned int enum_bit;
50902 static u8 enum_mask;
50903
50904-static struct cpci_hp_controller_ops generic_hpc_ops;
50905 static struct cpci_hp_controller generic_hpc;
50906
50907 static int __init validate_parameters(void)
50908@@ -139,6 +138,10 @@ static int query_enum(void)
50909 return ((value & enum_mask) == enum_mask);
50910 }
50911
50912+static struct cpci_hp_controller_ops generic_hpc_ops = {
50913+ .query_enum = query_enum,
50914+};
50915+
50916 static int __init cpcihp_generic_init(void)
50917 {
50918 int status;
50919@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
50920 pci_dev_put(dev);
50921
50922 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
50923- generic_hpc_ops.query_enum = query_enum;
50924 generic_hpc.ops = &generic_hpc_ops;
50925
50926 status = cpci_hp_register_controller(&generic_hpc);
50927diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
50928index 6757b3e..d3bad62 100644
50929--- a/drivers/pci/hotplug/cpcihp_zt5550.c
50930+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
50931@@ -59,7 +59,6 @@
50932 /* local variables */
50933 static bool debug;
50934 static bool poll;
50935-static struct cpci_hp_controller_ops zt5550_hpc_ops;
50936 static struct cpci_hp_controller zt5550_hpc;
50937
50938 /* Primary cPCI bus bridge device */
50939@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
50940 return 0;
50941 }
50942
50943+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
50944+ .query_enum = zt5550_hc_query_enum,
50945+};
50946+
50947 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
50948 {
50949 int status;
50950@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
50951 dbg("returned from zt5550_hc_config");
50952
50953 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
50954- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
50955 zt5550_hpc.ops = &zt5550_hpc_ops;
50956 if(!poll) {
50957 zt5550_hpc.irq = hc_dev->irq;
50958 zt5550_hpc.irq_flags = IRQF_SHARED;
50959 zt5550_hpc.dev_id = hc_dev;
50960
50961- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50962- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50963- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50964+ pax_open_kernel();
50965+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50966+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50967+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50968+ pax_open_kernel();
50969 } else {
50970 info("using ENUM# polling mode");
50971 }
50972diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
50973index 0968a9b..5a00edf 100644
50974--- a/drivers/pci/hotplug/cpqphp_nvram.c
50975+++ b/drivers/pci/hotplug/cpqphp_nvram.c
50976@@ -427,9 +427,13 @@ static u32 store_HRT (void __iomem *rom_start)
50977
50978 void compaq_nvram_init (void __iomem *rom_start)
50979 {
50980+
50981+#ifndef CONFIG_PAX_KERNEXEC
50982 if (rom_start) {
50983 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
50984 }
50985+#endif
50986+
50987 dbg("int15 entry = %p\n", compaq_int15_entry_point);
50988
50989 /* initialize our int15 lock */
50990diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
50991index 56d8486..f26113f 100644
50992--- a/drivers/pci/hotplug/pci_hotplug_core.c
50993+++ b/drivers/pci/hotplug/pci_hotplug_core.c
50994@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
50995 return -EINVAL;
50996 }
50997
50998- slot->ops->owner = owner;
50999- slot->ops->mod_name = mod_name;
51000+ pax_open_kernel();
51001+ *(struct module **)&slot->ops->owner = owner;
51002+ *(const char **)&slot->ops->mod_name = mod_name;
51003+ pax_close_kernel();
51004
51005 mutex_lock(&pci_hp_mutex);
51006 /*
51007diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
51008index a2297db..7c7d161 100644
51009--- a/drivers/pci/hotplug/pciehp_core.c
51010+++ b/drivers/pci/hotplug/pciehp_core.c
51011@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
51012 struct slot *slot = ctrl->slot;
51013 struct hotplug_slot *hotplug = NULL;
51014 struct hotplug_slot_info *info = NULL;
51015- struct hotplug_slot_ops *ops = NULL;
51016+ hotplug_slot_ops_no_const *ops = NULL;
51017 char name[SLOT_NAME_SIZE];
51018 int retval = -ENOMEM;
51019
51020diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
51021index 13f3d30..363cb44 100644
51022--- a/drivers/pci/msi.c
51023+++ b/drivers/pci/msi.c
51024@@ -523,8 +523,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
51025 {
51026 struct attribute **msi_attrs;
51027 struct attribute *msi_attr;
51028- struct device_attribute *msi_dev_attr;
51029- struct attribute_group *msi_irq_group;
51030+ device_attribute_no_const *msi_dev_attr;
51031+ attribute_group_no_const *msi_irq_group;
51032 const struct attribute_group **msi_irq_groups;
51033 struct msi_desc *entry;
51034 int ret = -ENOMEM;
51035@@ -584,7 +584,7 @@ error_attrs:
51036 count = 0;
51037 msi_attr = msi_attrs[count];
51038 while (msi_attr) {
51039- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
51040+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
51041 kfree(msi_attr->name);
51042 kfree(msi_dev_attr);
51043 ++count;
51044diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
51045index 9ff0a90..e819dda 100644
51046--- a/drivers/pci/pci-sysfs.c
51047+++ b/drivers/pci/pci-sysfs.c
51048@@ -1134,7 +1134,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
51049 {
51050 /* allocate attribute structure, piggyback attribute name */
51051 int name_len = write_combine ? 13 : 10;
51052- struct bin_attribute *res_attr;
51053+ bin_attribute_no_const *res_attr;
51054 int retval;
51055
51056 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
51057@@ -1311,7 +1311,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
51058 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
51059 {
51060 int retval;
51061- struct bin_attribute *attr;
51062+ bin_attribute_no_const *attr;
51063
51064 /* If the device has VPD, try to expose it in sysfs. */
51065 if (dev->vpd) {
51066@@ -1358,7 +1358,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
51067 {
51068 int retval;
51069 int rom_size = 0;
51070- struct bin_attribute *attr;
51071+ bin_attribute_no_const *attr;
51072
51073 if (!sysfs_initialized)
51074 return -EACCES;
51075diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
51076index 0601890..dc15007 100644
51077--- a/drivers/pci/pci.h
51078+++ b/drivers/pci/pci.h
51079@@ -91,7 +91,7 @@ struct pci_vpd_ops {
51080 struct pci_vpd {
51081 unsigned int len;
51082 const struct pci_vpd_ops *ops;
51083- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
51084+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
51085 };
51086
51087 int pci_vpd_pci22_init(struct pci_dev *dev);
51088diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
51089index e1e7026..d28dd33 100644
51090--- a/drivers/pci/pcie/aspm.c
51091+++ b/drivers/pci/pcie/aspm.c
51092@@ -27,9 +27,9 @@
51093 #define MODULE_PARAM_PREFIX "pcie_aspm."
51094
51095 /* Note: those are not register definitions */
51096-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
51097-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
51098-#define ASPM_STATE_L1 (4) /* L1 state */
51099+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
51100+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
51101+#define ASPM_STATE_L1 (4U) /* L1 state */
51102 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
51103 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
51104
51105diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
51106index e3cf8a2..be1baf0 100644
51107--- a/drivers/pci/probe.c
51108+++ b/drivers/pci/probe.c
51109@@ -176,7 +176,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
51110 struct pci_bus_region region, inverted_region;
51111 bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
51112
51113- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
51114+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
51115
51116 /* No printks while decoding is disabled! */
51117 if (!dev->mmio_always_on) {
51118diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
51119index 3f155e7..0f4b1f0 100644
51120--- a/drivers/pci/proc.c
51121+++ b/drivers/pci/proc.c
51122@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
51123 static int __init pci_proc_init(void)
51124 {
51125 struct pci_dev *dev = NULL;
51126+
51127+#ifdef CONFIG_GRKERNSEC_PROC_ADD
51128+#ifdef CONFIG_GRKERNSEC_PROC_USER
51129+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
51130+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51131+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
51132+#endif
51133+#else
51134 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
51135+#endif
51136 proc_create("devices", 0, proc_bus_pci_dir,
51137 &proc_bus_pci_dev_operations);
51138 proc_initialized = 1;
51139diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
51140index 7f1a2e2..bc4b405 100644
51141--- a/drivers/platform/chrome/chromeos_laptop.c
51142+++ b/drivers/platform/chrome/chromeos_laptop.c
51143@@ -395,7 +395,7 @@ static struct chromeos_laptop cr48 = {
51144 .callback = chromeos_laptop_dmi_matched, \
51145 .driver_data = (void *)&board_
51146
51147-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
51148+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
51149 {
51150 .ident = "Samsung Series 5 550",
51151 .matches = {
51152diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
51153index 297b664..ab91e39 100644
51154--- a/drivers/platform/x86/alienware-wmi.c
51155+++ b/drivers/platform/x86/alienware-wmi.c
51156@@ -133,7 +133,7 @@ struct wmax_led_args {
51157 } __packed;
51158
51159 static struct platform_device *platform_device;
51160-static struct device_attribute *zone_dev_attrs;
51161+static device_attribute_no_const *zone_dev_attrs;
51162 static struct attribute **zone_attrs;
51163 static struct platform_zone *zone_data;
51164
51165@@ -144,7 +144,7 @@ static struct platform_driver platform_driver = {
51166 }
51167 };
51168
51169-static struct attribute_group zone_attribute_group = {
51170+static attribute_group_no_const zone_attribute_group = {
51171 .name = "rgb_zones",
51172 };
51173
51174diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
51175index 3c6cced..12e0771 100644
51176--- a/drivers/platform/x86/asus-wmi.c
51177+++ b/drivers/platform/x86/asus-wmi.c
51178@@ -1592,6 +1592,10 @@ static int show_dsts(struct seq_file *m, void *data)
51179 int err;
51180 u32 retval = -1;
51181
51182+#ifdef CONFIG_GRKERNSEC_KMEM
51183+ return -EPERM;
51184+#endif
51185+
51186 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
51187
51188 if (err < 0)
51189@@ -1608,6 +1612,10 @@ static int show_devs(struct seq_file *m, void *data)
51190 int err;
51191 u32 retval = -1;
51192
51193+#ifdef CONFIG_GRKERNSEC_KMEM
51194+ return -EPERM;
51195+#endif
51196+
51197 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
51198 &retval);
51199
51200@@ -1632,6 +1640,10 @@ static int show_call(struct seq_file *m, void *data)
51201 union acpi_object *obj;
51202 acpi_status status;
51203
51204+#ifdef CONFIG_GRKERNSEC_KMEM
51205+ return -EPERM;
51206+#endif
51207+
51208 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
51209 1, asus->debug.method_id,
51210 &input, &output);
51211diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
51212index 62f8030..c7f2a45 100644
51213--- a/drivers/platform/x86/msi-laptop.c
51214+++ b/drivers/platform/x86/msi-laptop.c
51215@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
51216
51217 if (!quirks->ec_read_only) {
51218 /* allow userland write sysfs file */
51219- dev_attr_bluetooth.store = store_bluetooth;
51220- dev_attr_wlan.store = store_wlan;
51221- dev_attr_threeg.store = store_threeg;
51222- dev_attr_bluetooth.attr.mode |= S_IWUSR;
51223- dev_attr_wlan.attr.mode |= S_IWUSR;
51224- dev_attr_threeg.attr.mode |= S_IWUSR;
51225+ pax_open_kernel();
51226+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
51227+ *(void **)&dev_attr_wlan.store = store_wlan;
51228+ *(void **)&dev_attr_threeg.store = store_threeg;
51229+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
51230+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
51231+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
51232+ pax_close_kernel();
51233 }
51234
51235 /* disable hardware control by fn key */
51236diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
51237index 70222f2..8c8ce66 100644
51238--- a/drivers/platform/x86/msi-wmi.c
51239+++ b/drivers/platform/x86/msi-wmi.c
51240@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
51241 static void msi_wmi_notify(u32 value, void *context)
51242 {
51243 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
51244- static struct key_entry *key;
51245+ struct key_entry *key;
51246 union acpi_object *obj;
51247 acpi_status status;
51248
51249diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
51250index 9c5a074..06c976a 100644
51251--- a/drivers/platform/x86/sony-laptop.c
51252+++ b/drivers/platform/x86/sony-laptop.c
51253@@ -2527,7 +2527,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
51254 }
51255
51256 /* High speed charging function */
51257-static struct device_attribute *hsc_handle;
51258+static device_attribute_no_const *hsc_handle;
51259
51260 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
51261 struct device_attribute *attr,
51262@@ -2601,7 +2601,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
51263 }
51264
51265 /* low battery function */
51266-static struct device_attribute *lowbatt_handle;
51267+static device_attribute_no_const *lowbatt_handle;
51268
51269 static ssize_t sony_nc_lowbatt_store(struct device *dev,
51270 struct device_attribute *attr,
51271@@ -2667,7 +2667,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
51272 }
51273
51274 /* fan speed function */
51275-static struct device_attribute *fan_handle, *hsf_handle;
51276+static device_attribute_no_const *fan_handle, *hsf_handle;
51277
51278 static ssize_t sony_nc_hsfan_store(struct device *dev,
51279 struct device_attribute *attr,
51280@@ -2774,7 +2774,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
51281 }
51282
51283 /* USB charge function */
51284-static struct device_attribute *uc_handle;
51285+static device_attribute_no_const *uc_handle;
51286
51287 static ssize_t sony_nc_usb_charge_store(struct device *dev,
51288 struct device_attribute *attr,
51289@@ -2848,7 +2848,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
51290 }
51291
51292 /* Panel ID function */
51293-static struct device_attribute *panel_handle;
51294+static device_attribute_no_const *panel_handle;
51295
51296 static ssize_t sony_nc_panelid_show(struct device *dev,
51297 struct device_attribute *attr, char *buffer)
51298@@ -2895,7 +2895,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
51299 }
51300
51301 /* smart connect function */
51302-static struct device_attribute *sc_handle;
51303+static device_attribute_no_const *sc_handle;
51304
51305 static ssize_t sony_nc_smart_conn_store(struct device *dev,
51306 struct device_attribute *attr,
51307diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
51308index d82f196..5458f34 100644
51309--- a/drivers/platform/x86/thinkpad_acpi.c
51310+++ b/drivers/platform/x86/thinkpad_acpi.c
51311@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
51312 return 0;
51313 }
51314
51315-void static hotkey_mask_warn_incomplete_mask(void)
51316+static void hotkey_mask_warn_incomplete_mask(void)
51317 {
51318 /* log only what the user can fix... */
51319 const u32 wantedmask = hotkey_driver_mask &
51320@@ -2438,10 +2438,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
51321 && !tp_features.bright_unkfw)
51322 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
51323 }
51324+}
51325
51326 #undef TPACPI_COMPARE_KEY
51327 #undef TPACPI_MAY_SEND_KEY
51328-}
51329
51330 /*
51331 * Polling driver
51332diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
51333index 438d4c7..ca8a2fb 100644
51334--- a/drivers/pnp/pnpbios/bioscalls.c
51335+++ b/drivers/pnp/pnpbios/bioscalls.c
51336@@ -59,7 +59,7 @@ do { \
51337 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
51338 } while(0)
51339
51340-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
51341+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
51342 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
51343
51344 /*
51345@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51346
51347 cpu = get_cpu();
51348 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
51349+
51350+ pax_open_kernel();
51351 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
51352+ pax_close_kernel();
51353
51354 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
51355 spin_lock_irqsave(&pnp_bios_lock, flags);
51356@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51357 :"memory");
51358 spin_unlock_irqrestore(&pnp_bios_lock, flags);
51359
51360+ pax_open_kernel();
51361 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
51362+ pax_close_kernel();
51363+
51364 put_cpu();
51365
51366 /* If we get here and this is set then the PnP BIOS faulted on us. */
51367@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
51368 return status;
51369 }
51370
51371-void pnpbios_calls_init(union pnp_bios_install_struct *header)
51372+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
51373 {
51374 int i;
51375
51376@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51377 pnp_bios_callpoint.offset = header->fields.pm16offset;
51378 pnp_bios_callpoint.segment = PNP_CS16;
51379
51380+ pax_open_kernel();
51381+
51382 for_each_possible_cpu(i) {
51383 struct desc_struct *gdt = get_cpu_gdt_table(i);
51384 if (!gdt)
51385@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51386 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
51387 (unsigned long)__va(header->fields.pm16dseg));
51388 }
51389+
51390+ pax_close_kernel();
51391 }
51392diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
51393index 0c52e2a..3421ab7 100644
51394--- a/drivers/power/pda_power.c
51395+++ b/drivers/power/pda_power.c
51396@@ -37,7 +37,11 @@ static int polling;
51397
51398 #if IS_ENABLED(CONFIG_USB_PHY)
51399 static struct usb_phy *transceiver;
51400-static struct notifier_block otg_nb;
51401+static int otg_handle_notification(struct notifier_block *nb,
51402+ unsigned long event, void *unused);
51403+static struct notifier_block otg_nb = {
51404+ .notifier_call = otg_handle_notification
51405+};
51406 #endif
51407
51408 static struct regulator *ac_draw;
51409@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
51410
51411 #if IS_ENABLED(CONFIG_USB_PHY)
51412 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
51413- otg_nb.notifier_call = otg_handle_notification;
51414 ret = usb_register_notifier(transceiver, &otg_nb);
51415 if (ret) {
51416 dev_err(dev, "failure to register otg notifier\n");
51417diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
51418index cc439fd..8fa30df 100644
51419--- a/drivers/power/power_supply.h
51420+++ b/drivers/power/power_supply.h
51421@@ -16,12 +16,12 @@ struct power_supply;
51422
51423 #ifdef CONFIG_SYSFS
51424
51425-extern void power_supply_init_attrs(struct device_type *dev_type);
51426+extern void power_supply_init_attrs(void);
51427 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
51428
51429 #else
51430
51431-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
51432+static inline void power_supply_init_attrs(void) {}
51433 #define power_supply_uevent NULL
51434
51435 #endif /* CONFIG_SYSFS */
51436diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
51437index 5a5a24e..f7a3754 100644
51438--- a/drivers/power/power_supply_core.c
51439+++ b/drivers/power/power_supply_core.c
51440@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
51441 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
51442 EXPORT_SYMBOL_GPL(power_supply_notifier);
51443
51444-static struct device_type power_supply_dev_type;
51445+extern const struct attribute_group *power_supply_attr_groups[];
51446+static struct device_type power_supply_dev_type = {
51447+ .groups = power_supply_attr_groups,
51448+};
51449
51450 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
51451 struct power_supply *supply)
51452@@ -639,7 +642,7 @@ static int __init power_supply_class_init(void)
51453 return PTR_ERR(power_supply_class);
51454
51455 power_supply_class->dev_uevent = power_supply_uevent;
51456- power_supply_init_attrs(&power_supply_dev_type);
51457+ power_supply_init_attrs();
51458
51459 return 0;
51460 }
51461diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
51462index 44420d1..967126e 100644
51463--- a/drivers/power/power_supply_sysfs.c
51464+++ b/drivers/power/power_supply_sysfs.c
51465@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
51466 .is_visible = power_supply_attr_is_visible,
51467 };
51468
51469-static const struct attribute_group *power_supply_attr_groups[] = {
51470+const struct attribute_group *power_supply_attr_groups[] = {
51471 &power_supply_attr_group,
51472 NULL,
51473 };
51474
51475-void power_supply_init_attrs(struct device_type *dev_type)
51476+void power_supply_init_attrs(void)
51477 {
51478 int i;
51479
51480- dev_type->groups = power_supply_attr_groups;
51481-
51482 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
51483 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
51484 }
51485diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
51486index 84419af..268ede8 100644
51487--- a/drivers/powercap/powercap_sys.c
51488+++ b/drivers/powercap/powercap_sys.c
51489@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
51490 struct device_attribute name_attr;
51491 };
51492
51493+static ssize_t show_constraint_name(struct device *dev,
51494+ struct device_attribute *dev_attr,
51495+ char *buf);
51496+
51497 static struct powercap_constraint_attr
51498- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
51499+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
51500+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
51501+ .power_limit_attr = {
51502+ .attr = {
51503+ .name = NULL,
51504+ .mode = S_IWUSR | S_IRUGO
51505+ },
51506+ .show = show_constraint_power_limit_uw,
51507+ .store = store_constraint_power_limit_uw
51508+ },
51509+
51510+ .time_window_attr = {
51511+ .attr = {
51512+ .name = NULL,
51513+ .mode = S_IWUSR | S_IRUGO
51514+ },
51515+ .show = show_constraint_time_window_us,
51516+ .store = store_constraint_time_window_us
51517+ },
51518+
51519+ .max_power_attr = {
51520+ .attr = {
51521+ .name = NULL,
51522+ .mode = S_IRUGO
51523+ },
51524+ .show = show_constraint_max_power_uw,
51525+ .store = NULL
51526+ },
51527+
51528+ .min_power_attr = {
51529+ .attr = {
51530+ .name = NULL,
51531+ .mode = S_IRUGO
51532+ },
51533+ .show = show_constraint_min_power_uw,
51534+ .store = NULL
51535+ },
51536+
51537+ .max_time_window_attr = {
51538+ .attr = {
51539+ .name = NULL,
51540+ .mode = S_IRUGO
51541+ },
51542+ .show = show_constraint_max_time_window_us,
51543+ .store = NULL
51544+ },
51545+
51546+ .min_time_window_attr = {
51547+ .attr = {
51548+ .name = NULL,
51549+ .mode = S_IRUGO
51550+ },
51551+ .show = show_constraint_min_time_window_us,
51552+ .store = NULL
51553+ },
51554+
51555+ .name_attr = {
51556+ .attr = {
51557+ .name = NULL,
51558+ .mode = S_IRUGO
51559+ },
51560+ .show = show_constraint_name,
51561+ .store = NULL
51562+ }
51563+ }
51564+};
51565
51566 /* A list of powercap control_types */
51567 static LIST_HEAD(powercap_cntrl_list);
51568@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
51569 }
51570
51571 static int create_constraint_attribute(int id, const char *name,
51572- int mode,
51573- struct device_attribute *dev_attr,
51574- ssize_t (*show)(struct device *,
51575- struct device_attribute *, char *),
51576- ssize_t (*store)(struct device *,
51577- struct device_attribute *,
51578- const char *, size_t)
51579- )
51580+ struct device_attribute *dev_attr)
51581 {
51582+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
51583
51584- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
51585- id, name);
51586- if (!dev_attr->attr.name)
51587+ if (!name)
51588 return -ENOMEM;
51589- dev_attr->attr.mode = mode;
51590- dev_attr->show = show;
51591- dev_attr->store = store;
51592+
51593+ pax_open_kernel();
51594+ *(const char **)&dev_attr->attr.name = name;
51595+ pax_close_kernel();
51596
51597 return 0;
51598 }
51599@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
51600
51601 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
51602 ret = create_constraint_attribute(i, "power_limit_uw",
51603- S_IWUSR | S_IRUGO,
51604- &constraint_attrs[i].power_limit_attr,
51605- show_constraint_power_limit_uw,
51606- store_constraint_power_limit_uw);
51607+ &constraint_attrs[i].power_limit_attr);
51608 if (ret)
51609 goto err_alloc;
51610 ret = create_constraint_attribute(i, "time_window_us",
51611- S_IWUSR | S_IRUGO,
51612- &constraint_attrs[i].time_window_attr,
51613- show_constraint_time_window_us,
51614- store_constraint_time_window_us);
51615+ &constraint_attrs[i].time_window_attr);
51616 if (ret)
51617 goto err_alloc;
51618- ret = create_constraint_attribute(i, "name", S_IRUGO,
51619- &constraint_attrs[i].name_attr,
51620- show_constraint_name,
51621- NULL);
51622+ ret = create_constraint_attribute(i, "name",
51623+ &constraint_attrs[i].name_attr);
51624 if (ret)
51625 goto err_alloc;
51626- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
51627- &constraint_attrs[i].max_power_attr,
51628- show_constraint_max_power_uw,
51629- NULL);
51630+ ret = create_constraint_attribute(i, "max_power_uw",
51631+ &constraint_attrs[i].max_power_attr);
51632 if (ret)
51633 goto err_alloc;
51634- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
51635- &constraint_attrs[i].min_power_attr,
51636- show_constraint_min_power_uw,
51637- NULL);
51638+ ret = create_constraint_attribute(i, "min_power_uw",
51639+ &constraint_attrs[i].min_power_attr);
51640 if (ret)
51641 goto err_alloc;
51642 ret = create_constraint_attribute(i, "max_time_window_us",
51643- S_IRUGO,
51644- &constraint_attrs[i].max_time_window_attr,
51645- show_constraint_max_time_window_us,
51646- NULL);
51647+ &constraint_attrs[i].max_time_window_attr);
51648 if (ret)
51649 goto err_alloc;
51650 ret = create_constraint_attribute(i, "min_time_window_us",
51651- S_IRUGO,
51652- &constraint_attrs[i].min_time_window_attr,
51653- show_constraint_min_time_window_us,
51654- NULL);
51655+ &constraint_attrs[i].min_time_window_attr);
51656 if (ret)
51657 goto err_alloc;
51658
51659@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
51660 power_zone->zone_dev_attrs[count++] =
51661 &dev_attr_max_energy_range_uj.attr;
51662 if (power_zone->ops->get_energy_uj) {
51663+ pax_open_kernel();
51664 if (power_zone->ops->reset_energy_uj)
51665- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51666+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51667 else
51668- dev_attr_energy_uj.attr.mode = S_IRUGO;
51669+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
51670+ pax_close_kernel();
51671 power_zone->zone_dev_attrs[count++] =
51672 &dev_attr_energy_uj.attr;
51673 }
51674diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
51675index ee3de34..bec7285 100644
51676--- a/drivers/ptp/Kconfig
51677+++ b/drivers/ptp/Kconfig
51678@@ -8,7 +8,6 @@ config PTP_1588_CLOCK
51679 tristate "PTP clock support"
51680 depends on NET
51681 select PPS
51682- select NET_PTP_CLASSIFY
51683 help
51684 The IEEE 1588 standard defines a method to precisely
51685 synchronize distributed clocks over Ethernet networks. The
51686diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
51687index 9c5d414..c7900ce 100644
51688--- a/drivers/ptp/ptp_private.h
51689+++ b/drivers/ptp/ptp_private.h
51690@@ -51,7 +51,7 @@ struct ptp_clock {
51691 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
51692 wait_queue_head_t tsev_wq;
51693 int defunct; /* tells readers to go away when clock is being removed */
51694- struct device_attribute *pin_dev_attr;
51695+ device_attribute_no_const *pin_dev_attr;
51696 struct attribute **pin_attr;
51697 struct attribute_group pin_attr_group;
51698 };
51699diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
51700index 302e626..12579af 100644
51701--- a/drivers/ptp/ptp_sysfs.c
51702+++ b/drivers/ptp/ptp_sysfs.c
51703@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
51704 goto no_pin_attr;
51705
51706 for (i = 0; i < n_pins; i++) {
51707- struct device_attribute *da = &ptp->pin_dev_attr[i];
51708+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
51709 sysfs_attr_init(&da->attr);
51710 da->attr.name = info->pin_config[i].name;
51711 da->attr.mode = 0644;
51712diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
51713index 4c1f999..11078c9 100644
51714--- a/drivers/regulator/core.c
51715+++ b/drivers/regulator/core.c
51716@@ -3391,7 +3391,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51717 {
51718 const struct regulation_constraints *constraints = NULL;
51719 const struct regulator_init_data *init_data;
51720- static atomic_t regulator_no = ATOMIC_INIT(0);
51721+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
51722 struct regulator_dev *rdev;
51723 struct device *dev;
51724 int ret, i;
51725@@ -3461,7 +3461,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51726 rdev->dev.of_node = of_node_get(config->of_node);
51727 rdev->dev.parent = dev;
51728 dev_set_name(&rdev->dev, "regulator.%d",
51729- atomic_inc_return(&regulator_no) - 1);
51730+ atomic_inc_return_unchecked(&regulator_no) - 1);
51731 ret = device_register(&rdev->dev);
51732 if (ret != 0) {
51733 put_device(&rdev->dev);
51734diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
51735index 2fc4111..6aa88ca 100644
51736--- a/drivers/regulator/max8660.c
51737+++ b/drivers/regulator/max8660.c
51738@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
51739 max8660->shadow_regs[MAX8660_OVER1] = 5;
51740 } else {
51741 /* Otherwise devices can be toggled via software */
51742- max8660_dcdc_ops.enable = max8660_dcdc_enable;
51743- max8660_dcdc_ops.disable = max8660_dcdc_disable;
51744+ pax_open_kernel();
51745+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
51746+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
51747+ pax_close_kernel();
51748 }
51749
51750 /*
51751diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
51752index dbedf17..18ff6b7 100644
51753--- a/drivers/regulator/max8973-regulator.c
51754+++ b/drivers/regulator/max8973-regulator.c
51755@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
51756 if (!pdata || !pdata->enable_ext_control) {
51757 max->desc.enable_reg = MAX8973_VOUT;
51758 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
51759- max->ops.enable = regulator_enable_regmap;
51760- max->ops.disable = regulator_disable_regmap;
51761- max->ops.is_enabled = regulator_is_enabled_regmap;
51762+ pax_open_kernel();
51763+ *(void **)&max->ops.enable = regulator_enable_regmap;
51764+ *(void **)&max->ops.disable = regulator_disable_regmap;
51765+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
51766+ pax_close_kernel();
51767 }
51768
51769 if (pdata) {
51770diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
51771index f374fa5..26f0683 100644
51772--- a/drivers/regulator/mc13892-regulator.c
51773+++ b/drivers/regulator/mc13892-regulator.c
51774@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
51775 }
51776 mc13xxx_unlock(mc13892);
51777
51778- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
51779+ pax_open_kernel();
51780+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
51781 = mc13892_vcam_set_mode;
51782- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
51783+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
51784 = mc13892_vcam_get_mode;
51785+ pax_close_kernel();
51786
51787 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
51788 ARRAY_SIZE(mc13892_regulators));
51789diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
51790index b0e4a3e..e5dc11e 100644
51791--- a/drivers/rtc/rtc-cmos.c
51792+++ b/drivers/rtc/rtc-cmos.c
51793@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
51794 hpet_rtc_timer_init();
51795
51796 /* export at least the first block of NVRAM */
51797- nvram.size = address_space - NVRAM_OFFSET;
51798+ pax_open_kernel();
51799+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
51800+ pax_close_kernel();
51801 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
51802 if (retval < 0) {
51803 dev_dbg(dev, "can't create nvram file? %d\n", retval);
51804diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
51805index d049393..bb20be0 100644
51806--- a/drivers/rtc/rtc-dev.c
51807+++ b/drivers/rtc/rtc-dev.c
51808@@ -16,6 +16,7 @@
51809 #include <linux/module.h>
51810 #include <linux/rtc.h>
51811 #include <linux/sched.h>
51812+#include <linux/grsecurity.h>
51813 #include "rtc-core.h"
51814
51815 static dev_t rtc_devt;
51816@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
51817 if (copy_from_user(&tm, uarg, sizeof(tm)))
51818 return -EFAULT;
51819
51820+ gr_log_timechange();
51821+
51822 return rtc_set_time(rtc, &tm);
51823
51824 case RTC_PIE_ON:
51825diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
51826index f03d5ba..8325bf6 100644
51827--- a/drivers/rtc/rtc-ds1307.c
51828+++ b/drivers/rtc/rtc-ds1307.c
51829@@ -107,7 +107,7 @@ struct ds1307 {
51830 u8 offset; /* register's offset */
51831 u8 regs[11];
51832 u16 nvram_offset;
51833- struct bin_attribute *nvram;
51834+ bin_attribute_no_const *nvram;
51835 enum ds_type type;
51836 unsigned long flags;
51837 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
51838diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
51839index 11880c1..b823aa4 100644
51840--- a/drivers/rtc/rtc-m48t59.c
51841+++ b/drivers/rtc/rtc-m48t59.c
51842@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
51843 if (IS_ERR(m48t59->rtc))
51844 return PTR_ERR(m48t59->rtc);
51845
51846- m48t59_nvram_attr.size = pdata->offset;
51847+ pax_open_kernel();
51848+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
51849+ pax_close_kernel();
51850
51851 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
51852 if (ret)
51853diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
51854index e693af6..2e525b6 100644
51855--- a/drivers/scsi/bfa/bfa_fcpim.h
51856+++ b/drivers/scsi/bfa/bfa_fcpim.h
51857@@ -36,7 +36,7 @@ struct bfa_iotag_s {
51858
51859 struct bfa_itn_s {
51860 bfa_isr_func_t isr;
51861-};
51862+} __no_const;
51863
51864 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
51865 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
51866diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
51867index a3ab5cc..8143622 100644
51868--- a/drivers/scsi/bfa/bfa_fcs.c
51869+++ b/drivers/scsi/bfa/bfa_fcs.c
51870@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
51871 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
51872
51873 static struct bfa_fcs_mod_s fcs_modules[] = {
51874- { bfa_fcs_port_attach, NULL, NULL },
51875- { bfa_fcs_uf_attach, NULL, NULL },
51876- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
51877- bfa_fcs_fabric_modexit },
51878+ {
51879+ .attach = bfa_fcs_port_attach,
51880+ .modinit = NULL,
51881+ .modexit = NULL
51882+ },
51883+ {
51884+ .attach = bfa_fcs_uf_attach,
51885+ .modinit = NULL,
51886+ .modexit = NULL
51887+ },
51888+ {
51889+ .attach = bfa_fcs_fabric_attach,
51890+ .modinit = bfa_fcs_fabric_modinit,
51891+ .modexit = bfa_fcs_fabric_modexit
51892+ },
51893 };
51894
51895 /*
51896diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
51897index ff75ef8..2dfe00a 100644
51898--- a/drivers/scsi/bfa/bfa_fcs_lport.c
51899+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
51900@@ -89,15 +89,26 @@ static struct {
51901 void (*offline) (struct bfa_fcs_lport_s *port);
51902 } __port_action[] = {
51903 {
51904- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
51905- bfa_fcs_lport_unknown_offline}, {
51906- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
51907- bfa_fcs_lport_fab_offline}, {
51908- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
51909- bfa_fcs_lport_n2n_offline}, {
51910- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
51911- bfa_fcs_lport_loop_offline},
51912- };
51913+ .init = bfa_fcs_lport_unknown_init,
51914+ .online = bfa_fcs_lport_unknown_online,
51915+ .offline = bfa_fcs_lport_unknown_offline
51916+ },
51917+ {
51918+ .init = bfa_fcs_lport_fab_init,
51919+ .online = bfa_fcs_lport_fab_online,
51920+ .offline = bfa_fcs_lport_fab_offline
51921+ },
51922+ {
51923+ .init = bfa_fcs_lport_n2n_init,
51924+ .online = bfa_fcs_lport_n2n_online,
51925+ .offline = bfa_fcs_lport_n2n_offline
51926+ },
51927+ {
51928+ .init = bfa_fcs_lport_loop_init,
51929+ .online = bfa_fcs_lport_loop_online,
51930+ .offline = bfa_fcs_lport_loop_offline
51931+ },
51932+};
51933
51934 /*
51935 * fcs_port_sm FCS logical port state machine
51936diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
51937index 2e28392..9d865b6 100644
51938--- a/drivers/scsi/bfa/bfa_ioc.h
51939+++ b/drivers/scsi/bfa/bfa_ioc.h
51940@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
51941 bfa_ioc_disable_cbfn_t disable_cbfn;
51942 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
51943 bfa_ioc_reset_cbfn_t reset_cbfn;
51944-};
51945+} __no_const;
51946
51947 /*
51948 * IOC event notification mechanism.
51949@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
51950 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
51951 enum bfi_ioc_state fwstate);
51952 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
51953-};
51954+} __no_const;
51955
51956 /*
51957 * Queue element to wait for room in request queue. FIFO order is
51958diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
51959index a14c784..6de6790 100644
51960--- a/drivers/scsi/bfa/bfa_modules.h
51961+++ b/drivers/scsi/bfa/bfa_modules.h
51962@@ -78,12 +78,12 @@ enum {
51963 \
51964 extern struct bfa_module_s hal_mod_ ## __mod; \
51965 struct bfa_module_s hal_mod_ ## __mod = { \
51966- bfa_ ## __mod ## _meminfo, \
51967- bfa_ ## __mod ## _attach, \
51968- bfa_ ## __mod ## _detach, \
51969- bfa_ ## __mod ## _start, \
51970- bfa_ ## __mod ## _stop, \
51971- bfa_ ## __mod ## _iocdisable, \
51972+ .meminfo = bfa_ ## __mod ## _meminfo, \
51973+ .attach = bfa_ ## __mod ## _attach, \
51974+ .detach = bfa_ ## __mod ## _detach, \
51975+ .start = bfa_ ## __mod ## _start, \
51976+ .stop = bfa_ ## __mod ## _stop, \
51977+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
51978 }
51979
51980 #define BFA_CACHELINE_SZ (256)
51981diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
51982index 045c4e1..13de803 100644
51983--- a/drivers/scsi/fcoe/fcoe_sysfs.c
51984+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
51985@@ -33,8 +33,8 @@
51986 */
51987 #include "libfcoe.h"
51988
51989-static atomic_t ctlr_num;
51990-static atomic_t fcf_num;
51991+static atomic_unchecked_t ctlr_num;
51992+static atomic_unchecked_t fcf_num;
51993
51994 /*
51995 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
51996@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
51997 if (!ctlr)
51998 goto out;
51999
52000- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
52001+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
52002 ctlr->f = f;
52003 ctlr->mode = FIP_CONN_TYPE_FABRIC;
52004 INIT_LIST_HEAD(&ctlr->fcfs);
52005@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
52006 fcf->dev.parent = &ctlr->dev;
52007 fcf->dev.bus = &fcoe_bus_type;
52008 fcf->dev.type = &fcoe_fcf_device_type;
52009- fcf->id = atomic_inc_return(&fcf_num) - 1;
52010+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
52011 fcf->state = FCOE_FCF_STATE_UNKNOWN;
52012
52013 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
52014@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
52015 {
52016 int error;
52017
52018- atomic_set(&ctlr_num, 0);
52019- atomic_set(&fcf_num, 0);
52020+ atomic_set_unchecked(&ctlr_num, 0);
52021+ atomic_set_unchecked(&fcf_num, 0);
52022
52023 error = bus_register(&fcoe_bus_type);
52024 if (error)
52025diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
52026index 3cbb57a..95e47a3 100644
52027--- a/drivers/scsi/hosts.c
52028+++ b/drivers/scsi/hosts.c
52029@@ -42,7 +42,7 @@
52030 #include "scsi_logging.h"
52031
52032
52033-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52034+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52035
52036
52037 static void scsi_host_cls_release(struct device *dev)
52038@@ -369,7 +369,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
52039 * subtract one because we increment first then return, but we need to
52040 * know what the next host number was before increment
52041 */
52042- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
52043+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
52044 shost->dma_channel = 0xff;
52045
52046 /* These three are default values which can be overridden */
52047diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
52048index 31184b3..cc44bbf 100644
52049--- a/drivers/scsi/hpsa.c
52050+++ b/drivers/scsi/hpsa.c
52051@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
52052 unsigned long flags;
52053
52054 if (h->transMethod & CFGTBL_Trans_io_accel1)
52055- return h->access.command_completed(h, q);
52056+ return h->access->command_completed(h, q);
52057
52058 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
52059- return h->access.command_completed(h, q);
52060+ return h->access->command_completed(h, q);
52061
52062 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
52063 a = rq->head[rq->current_entry];
52064@@ -5455,7 +5455,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
52065 while (!list_empty(&h->reqQ)) {
52066 c = list_entry(h->reqQ.next, struct CommandList, list);
52067 /* can't do anything if fifo is full */
52068- if ((h->access.fifo_full(h))) {
52069+ if ((h->access->fifo_full(h))) {
52070 h->fifo_recently_full = 1;
52071 dev_warn(&h->pdev->dev, "fifo full\n");
52072 break;
52073@@ -5477,7 +5477,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
52074
52075 /* Tell the controller execute command */
52076 spin_unlock_irqrestore(&h->lock, *flags);
52077- h->access.submit_command(h, c);
52078+ h->access->submit_command(h, c);
52079 spin_lock_irqsave(&h->lock, *flags);
52080 }
52081 }
52082@@ -5493,17 +5493,17 @@ static void lock_and_start_io(struct ctlr_info *h)
52083
52084 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
52085 {
52086- return h->access.command_completed(h, q);
52087+ return h->access->command_completed(h, q);
52088 }
52089
52090 static inline bool interrupt_pending(struct ctlr_info *h)
52091 {
52092- return h->access.intr_pending(h);
52093+ return h->access->intr_pending(h);
52094 }
52095
52096 static inline long interrupt_not_for_us(struct ctlr_info *h)
52097 {
52098- return (h->access.intr_pending(h) == 0) ||
52099+ return (h->access->intr_pending(h) == 0) ||
52100 (h->interrupts_enabled == 0);
52101 }
52102
52103@@ -6459,7 +6459,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
52104 if (prod_index < 0)
52105 return -ENODEV;
52106 h->product_name = products[prod_index].product_name;
52107- h->access = *(products[prod_index].access);
52108+ h->access = products[prod_index].access;
52109
52110 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
52111 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
52112@@ -6781,7 +6781,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
52113 unsigned long flags;
52114 u32 lockup_detected;
52115
52116- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52117+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52118 spin_lock_irqsave(&h->lock, flags);
52119 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
52120 if (!lockup_detected) {
52121@@ -7022,7 +7022,7 @@ reinit_after_soft_reset:
52122 }
52123
52124 /* make sure the board interrupts are off */
52125- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52126+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52127
52128 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
52129 goto clean2;
52130@@ -7057,7 +7057,7 @@ reinit_after_soft_reset:
52131 * fake ones to scoop up any residual completions.
52132 */
52133 spin_lock_irqsave(&h->lock, flags);
52134- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52135+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52136 spin_unlock_irqrestore(&h->lock, flags);
52137 free_irqs(h);
52138 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
52139@@ -7076,9 +7076,9 @@ reinit_after_soft_reset:
52140 dev_info(&h->pdev->dev, "Board READY.\n");
52141 dev_info(&h->pdev->dev,
52142 "Waiting for stale completions to drain.\n");
52143- h->access.set_intr_mask(h, HPSA_INTR_ON);
52144+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52145 msleep(10000);
52146- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52147+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52148
52149 rc = controller_reset_failed(h->cfgtable);
52150 if (rc)
52151@@ -7104,7 +7104,7 @@ reinit_after_soft_reset:
52152 h->drv_req_rescan = 0;
52153
52154 /* Turn the interrupts on so we can service requests */
52155- h->access.set_intr_mask(h, HPSA_INTR_ON);
52156+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52157
52158 hpsa_hba_inquiry(h);
52159 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
52160@@ -7169,7 +7169,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
52161 * To write all data in the battery backed cache to disks
52162 */
52163 hpsa_flush_cache(h);
52164- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52165+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52166 hpsa_free_irqs_and_disable_msix(h);
52167 }
52168
52169@@ -7287,7 +7287,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52170 CFGTBL_Trans_enable_directed_msix |
52171 (trans_support & (CFGTBL_Trans_io_accel1 |
52172 CFGTBL_Trans_io_accel2));
52173- struct access_method access = SA5_performant_access;
52174+ struct access_method *access = &SA5_performant_access;
52175
52176 /* This is a bit complicated. There are 8 registers on
52177 * the controller which we write to to tell it 8 different
52178@@ -7329,7 +7329,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52179 * perform the superfluous readl() after each command submission.
52180 */
52181 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
52182- access = SA5_performant_access_no_read;
52183+ access = &SA5_performant_access_no_read;
52184
52185 /* Controller spec: zero out this buffer. */
52186 for (i = 0; i < h->nreply_queues; i++)
52187@@ -7359,12 +7359,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52188 * enable outbound interrupt coalescing in accelerator mode;
52189 */
52190 if (trans_support & CFGTBL_Trans_io_accel1) {
52191- access = SA5_ioaccel_mode1_access;
52192+ access = &SA5_ioaccel_mode1_access;
52193 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52194 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52195 } else {
52196 if (trans_support & CFGTBL_Trans_io_accel2) {
52197- access = SA5_ioaccel_mode2_access;
52198+ access = &SA5_ioaccel_mode2_access;
52199 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52200 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52201 }
52202diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
52203index 24472ce..8782caf 100644
52204--- a/drivers/scsi/hpsa.h
52205+++ b/drivers/scsi/hpsa.h
52206@@ -127,7 +127,7 @@ struct ctlr_info {
52207 unsigned int msix_vector;
52208 unsigned int msi_vector;
52209 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
52210- struct access_method access;
52211+ struct access_method *access;
52212 char hba_mode_enabled;
52213
52214 /* queue and queue Info */
52215@@ -536,43 +536,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
52216 }
52217
52218 static struct access_method SA5_access = {
52219- SA5_submit_command,
52220- SA5_intr_mask,
52221- SA5_fifo_full,
52222- SA5_intr_pending,
52223- SA5_completed,
52224+ .submit_command = SA5_submit_command,
52225+ .set_intr_mask = SA5_intr_mask,
52226+ .fifo_full = SA5_fifo_full,
52227+ .intr_pending = SA5_intr_pending,
52228+ .command_completed = SA5_completed,
52229 };
52230
52231 static struct access_method SA5_ioaccel_mode1_access = {
52232- SA5_submit_command,
52233- SA5_performant_intr_mask,
52234- SA5_fifo_full,
52235- SA5_ioaccel_mode1_intr_pending,
52236- SA5_ioaccel_mode1_completed,
52237+ .submit_command = SA5_submit_command,
52238+ .set_intr_mask = SA5_performant_intr_mask,
52239+ .fifo_full = SA5_fifo_full,
52240+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
52241+ .command_completed = SA5_ioaccel_mode1_completed,
52242 };
52243
52244 static struct access_method SA5_ioaccel_mode2_access = {
52245- SA5_submit_command_ioaccel2,
52246- SA5_performant_intr_mask,
52247- SA5_fifo_full,
52248- SA5_performant_intr_pending,
52249- SA5_performant_completed,
52250+ .submit_command = SA5_submit_command_ioaccel2,
52251+ .set_intr_mask = SA5_performant_intr_mask,
52252+ .fifo_full = SA5_fifo_full,
52253+ .intr_pending = SA5_performant_intr_pending,
52254+ .command_completed = SA5_performant_completed,
52255 };
52256
52257 static struct access_method SA5_performant_access = {
52258- SA5_submit_command,
52259- SA5_performant_intr_mask,
52260- SA5_fifo_full,
52261- SA5_performant_intr_pending,
52262- SA5_performant_completed,
52263+ .submit_command = SA5_submit_command,
52264+ .set_intr_mask = SA5_performant_intr_mask,
52265+ .fifo_full = SA5_fifo_full,
52266+ .intr_pending = SA5_performant_intr_pending,
52267+ .command_completed = SA5_performant_completed,
52268 };
52269
52270 static struct access_method SA5_performant_access_no_read = {
52271- SA5_submit_command_no_read,
52272- SA5_performant_intr_mask,
52273- SA5_fifo_full,
52274- SA5_performant_intr_pending,
52275- SA5_performant_completed,
52276+ .submit_command = SA5_submit_command_no_read,
52277+ .set_intr_mask = SA5_performant_intr_mask,
52278+ .fifo_full = SA5_fifo_full,
52279+ .intr_pending = SA5_performant_intr_pending,
52280+ .command_completed = SA5_performant_completed,
52281 };
52282
52283 struct board_type {
52284diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
52285index 1b3a094..068e683 100644
52286--- a/drivers/scsi/libfc/fc_exch.c
52287+++ b/drivers/scsi/libfc/fc_exch.c
52288@@ -101,12 +101,12 @@ struct fc_exch_mgr {
52289 u16 pool_max_index;
52290
52291 struct {
52292- atomic_t no_free_exch;
52293- atomic_t no_free_exch_xid;
52294- atomic_t xid_not_found;
52295- atomic_t xid_busy;
52296- atomic_t seq_not_found;
52297- atomic_t non_bls_resp;
52298+ atomic_unchecked_t no_free_exch;
52299+ atomic_unchecked_t no_free_exch_xid;
52300+ atomic_unchecked_t xid_not_found;
52301+ atomic_unchecked_t xid_busy;
52302+ atomic_unchecked_t seq_not_found;
52303+ atomic_unchecked_t non_bls_resp;
52304 } stats;
52305 };
52306
52307@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
52308 /* allocate memory for exchange */
52309 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
52310 if (!ep) {
52311- atomic_inc(&mp->stats.no_free_exch);
52312+ atomic_inc_unchecked(&mp->stats.no_free_exch);
52313 goto out;
52314 }
52315 memset(ep, 0, sizeof(*ep));
52316@@ -874,7 +874,7 @@ out:
52317 return ep;
52318 err:
52319 spin_unlock_bh(&pool->lock);
52320- atomic_inc(&mp->stats.no_free_exch_xid);
52321+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
52322 mempool_free(ep, mp->ep_pool);
52323 return NULL;
52324 }
52325@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52326 xid = ntohs(fh->fh_ox_id); /* we originated exch */
52327 ep = fc_exch_find(mp, xid);
52328 if (!ep) {
52329- atomic_inc(&mp->stats.xid_not_found);
52330+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52331 reject = FC_RJT_OX_ID;
52332 goto out;
52333 }
52334@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52335 ep = fc_exch_find(mp, xid);
52336 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
52337 if (ep) {
52338- atomic_inc(&mp->stats.xid_busy);
52339+ atomic_inc_unchecked(&mp->stats.xid_busy);
52340 reject = FC_RJT_RX_ID;
52341 goto rel;
52342 }
52343@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52344 }
52345 xid = ep->xid; /* get our XID */
52346 } else if (!ep) {
52347- atomic_inc(&mp->stats.xid_not_found);
52348+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52349 reject = FC_RJT_RX_ID; /* XID not found */
52350 goto out;
52351 }
52352@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52353 } else {
52354 sp = &ep->seq;
52355 if (sp->id != fh->fh_seq_id) {
52356- atomic_inc(&mp->stats.seq_not_found);
52357+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52358 if (f_ctl & FC_FC_END_SEQ) {
52359 /*
52360 * Update sequence_id based on incoming last
52361@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52362
52363 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
52364 if (!ep) {
52365- atomic_inc(&mp->stats.xid_not_found);
52366+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52367 goto out;
52368 }
52369 if (ep->esb_stat & ESB_ST_COMPLETE) {
52370- atomic_inc(&mp->stats.xid_not_found);
52371+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52372 goto rel;
52373 }
52374 if (ep->rxid == FC_XID_UNKNOWN)
52375 ep->rxid = ntohs(fh->fh_rx_id);
52376 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
52377- atomic_inc(&mp->stats.xid_not_found);
52378+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52379 goto rel;
52380 }
52381 if (ep->did != ntoh24(fh->fh_s_id) &&
52382 ep->did != FC_FID_FLOGI) {
52383- atomic_inc(&mp->stats.xid_not_found);
52384+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52385 goto rel;
52386 }
52387 sof = fr_sof(fp);
52388@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52389 sp->ssb_stat |= SSB_ST_RESP;
52390 sp->id = fh->fh_seq_id;
52391 } else if (sp->id != fh->fh_seq_id) {
52392- atomic_inc(&mp->stats.seq_not_found);
52393+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52394 goto rel;
52395 }
52396
52397@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52398 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
52399
52400 if (!sp)
52401- atomic_inc(&mp->stats.xid_not_found);
52402+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52403 else
52404- atomic_inc(&mp->stats.non_bls_resp);
52405+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
52406
52407 fc_frame_free(fp);
52408 }
52409@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
52410
52411 list_for_each_entry(ema, &lport->ema_list, ema_list) {
52412 mp = ema->mp;
52413- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
52414+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
52415 st->fc_no_free_exch_xid +=
52416- atomic_read(&mp->stats.no_free_exch_xid);
52417- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
52418- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
52419- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
52420- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
52421+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
52422+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
52423+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
52424+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
52425+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
52426 }
52427 }
52428 EXPORT_SYMBOL(fc_exch_update_stats);
52429diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
52430index 766098a..1c6c971 100644
52431--- a/drivers/scsi/libsas/sas_ata.c
52432+++ b/drivers/scsi/libsas/sas_ata.c
52433@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
52434 .postreset = ata_std_postreset,
52435 .error_handler = ata_std_error_handler,
52436 .post_internal_cmd = sas_ata_post_internal,
52437- .qc_defer = ata_std_qc_defer,
52438+ .qc_defer = ata_std_qc_defer,
52439 .qc_prep = ata_noop_qc_prep,
52440 .qc_issue = sas_ata_qc_issue,
52441 .qc_fill_rtf = sas_ata_qc_fill_rtf,
52442diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
52443index 434e903..5a4a79b 100644
52444--- a/drivers/scsi/lpfc/lpfc.h
52445+++ b/drivers/scsi/lpfc/lpfc.h
52446@@ -430,7 +430,7 @@ struct lpfc_vport {
52447 struct dentry *debug_nodelist;
52448 struct dentry *vport_debugfs_root;
52449 struct lpfc_debugfs_trc *disc_trc;
52450- atomic_t disc_trc_cnt;
52451+ atomic_unchecked_t disc_trc_cnt;
52452 #endif
52453 uint8_t stat_data_enabled;
52454 uint8_t stat_data_blocked;
52455@@ -880,8 +880,8 @@ struct lpfc_hba {
52456 struct timer_list fabric_block_timer;
52457 unsigned long bit_flags;
52458 #define FABRIC_COMANDS_BLOCKED 0
52459- atomic_t num_rsrc_err;
52460- atomic_t num_cmd_success;
52461+ atomic_unchecked_t num_rsrc_err;
52462+ atomic_unchecked_t num_cmd_success;
52463 unsigned long last_rsrc_error_time;
52464 unsigned long last_ramp_down_time;
52465 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
52466@@ -916,7 +916,7 @@ struct lpfc_hba {
52467
52468 struct dentry *debug_slow_ring_trc;
52469 struct lpfc_debugfs_trc *slow_ring_trc;
52470- atomic_t slow_ring_trc_cnt;
52471+ atomic_unchecked_t slow_ring_trc_cnt;
52472 /* iDiag debugfs sub-directory */
52473 struct dentry *idiag_root;
52474 struct dentry *idiag_pci_cfg;
52475diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
52476index b0aedce..89c6ca6 100644
52477--- a/drivers/scsi/lpfc/lpfc_debugfs.c
52478+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
52479@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
52480
52481 #include <linux/debugfs.h>
52482
52483-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52484+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52485 static unsigned long lpfc_debugfs_start_time = 0L;
52486
52487 /* iDiag */
52488@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
52489 lpfc_debugfs_enable = 0;
52490
52491 len = 0;
52492- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
52493+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
52494 (lpfc_debugfs_max_disc_trc - 1);
52495 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
52496 dtp = vport->disc_trc + i;
52497@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
52498 lpfc_debugfs_enable = 0;
52499
52500 len = 0;
52501- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
52502+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
52503 (lpfc_debugfs_max_slow_ring_trc - 1);
52504 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
52505 dtp = phba->slow_ring_trc + i;
52506@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
52507 !vport || !vport->disc_trc)
52508 return;
52509
52510- index = atomic_inc_return(&vport->disc_trc_cnt) &
52511+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
52512 (lpfc_debugfs_max_disc_trc - 1);
52513 dtp = vport->disc_trc + index;
52514 dtp->fmt = fmt;
52515 dtp->data1 = data1;
52516 dtp->data2 = data2;
52517 dtp->data3 = data3;
52518- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52519+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52520 dtp->jif = jiffies;
52521 #endif
52522 return;
52523@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
52524 !phba || !phba->slow_ring_trc)
52525 return;
52526
52527- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
52528+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
52529 (lpfc_debugfs_max_slow_ring_trc - 1);
52530 dtp = phba->slow_ring_trc + index;
52531 dtp->fmt = fmt;
52532 dtp->data1 = data1;
52533 dtp->data2 = data2;
52534 dtp->data3 = data3;
52535- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52536+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52537 dtp->jif = jiffies;
52538 #endif
52539 return;
52540@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52541 "slow_ring buffer\n");
52542 goto debug_failed;
52543 }
52544- atomic_set(&phba->slow_ring_trc_cnt, 0);
52545+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
52546 memset(phba->slow_ring_trc, 0,
52547 (sizeof(struct lpfc_debugfs_trc) *
52548 lpfc_debugfs_max_slow_ring_trc));
52549@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52550 "buffer\n");
52551 goto debug_failed;
52552 }
52553- atomic_set(&vport->disc_trc_cnt, 0);
52554+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
52555
52556 snprintf(name, sizeof(name), "discovery_trace");
52557 vport->debug_disc_trc =
52558diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
52559index 06f9a5b..82812092 100644
52560--- a/drivers/scsi/lpfc/lpfc_init.c
52561+++ b/drivers/scsi/lpfc/lpfc_init.c
52562@@ -11296,8 +11296,10 @@ lpfc_init(void)
52563 "misc_register returned with status %d", error);
52564
52565 if (lpfc_enable_npiv) {
52566- lpfc_transport_functions.vport_create = lpfc_vport_create;
52567- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52568+ pax_open_kernel();
52569+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
52570+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52571+ pax_close_kernel();
52572 }
52573 lpfc_transport_template =
52574 fc_attach_transport(&lpfc_transport_functions);
52575diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
52576index 2df11da..e660a2c 100644
52577--- a/drivers/scsi/lpfc/lpfc_scsi.c
52578+++ b/drivers/scsi/lpfc/lpfc_scsi.c
52579@@ -382,7 +382,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
52580 uint32_t evt_posted;
52581
52582 spin_lock_irqsave(&phba->hbalock, flags);
52583- atomic_inc(&phba->num_rsrc_err);
52584+ atomic_inc_unchecked(&phba->num_rsrc_err);
52585 phba->last_rsrc_error_time = jiffies;
52586
52587 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
52588@@ -423,8 +423,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52589 unsigned long num_rsrc_err, num_cmd_success;
52590 int i;
52591
52592- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
52593- num_cmd_success = atomic_read(&phba->num_cmd_success);
52594+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
52595+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
52596
52597 /*
52598 * The error and success command counters are global per
52599@@ -452,8 +452,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52600 }
52601 }
52602 lpfc_destroy_vport_work_array(phba, vports);
52603- atomic_set(&phba->num_rsrc_err, 0);
52604- atomic_set(&phba->num_cmd_success, 0);
52605+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
52606+ atomic_set_unchecked(&phba->num_cmd_success, 0);
52607 }
52608
52609 /**
52610diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52611index 5055f92..376cd98 100644
52612--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52613+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52614@@ -1557,7 +1557,7 @@ _scsih_get_resync(struct device *dev)
52615 {
52616 struct scsi_device *sdev = to_scsi_device(dev);
52617 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52618- static struct _raid_device *raid_device;
52619+ struct _raid_device *raid_device;
52620 unsigned long flags;
52621 Mpi2RaidVolPage0_t vol_pg0;
52622 Mpi2ConfigReply_t mpi_reply;
52623@@ -1609,7 +1609,7 @@ _scsih_get_state(struct device *dev)
52624 {
52625 struct scsi_device *sdev = to_scsi_device(dev);
52626 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52627- static struct _raid_device *raid_device;
52628+ struct _raid_device *raid_device;
52629 unsigned long flags;
52630 Mpi2RaidVolPage0_t vol_pg0;
52631 Mpi2ConfigReply_t mpi_reply;
52632@@ -6631,7 +6631,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
52633 struct fw_event_work *fw_event)
52634 {
52635 Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
52636- static struct _raid_device *raid_device;
52637+ struct _raid_device *raid_device;
52638 unsigned long flags;
52639 u16 handle;
52640
52641@@ -7102,7 +7102,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
52642 u64 sas_address;
52643 struct _sas_device *sas_device;
52644 struct _sas_node *expander_device;
52645- static struct _raid_device *raid_device;
52646+ struct _raid_device *raid_device;
52647 u8 retry_count;
52648 unsigned long flags;
52649
52650diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
52651index be8ce54..94ed33a 100644
52652--- a/drivers/scsi/pmcraid.c
52653+++ b/drivers/scsi/pmcraid.c
52654@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
52655 res->scsi_dev = scsi_dev;
52656 scsi_dev->hostdata = res;
52657 res->change_detected = 0;
52658- atomic_set(&res->read_failures, 0);
52659- atomic_set(&res->write_failures, 0);
52660+ atomic_set_unchecked(&res->read_failures, 0);
52661+ atomic_set_unchecked(&res->write_failures, 0);
52662 rc = 0;
52663 }
52664 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
52665@@ -2687,9 +2687,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
52666
52667 /* If this was a SCSI read/write command keep count of errors */
52668 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
52669- atomic_inc(&res->read_failures);
52670+ atomic_inc_unchecked(&res->read_failures);
52671 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
52672- atomic_inc(&res->write_failures);
52673+ atomic_inc_unchecked(&res->write_failures);
52674
52675 if (!RES_IS_GSCSI(res->cfg_entry) &&
52676 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
52677@@ -3545,7 +3545,7 @@ static int pmcraid_queuecommand_lck(
52678 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52679 * hrrq_id assigned here in queuecommand
52680 */
52681- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52682+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52683 pinstance->num_hrrq;
52684 cmd->cmd_done = pmcraid_io_done;
52685
52686@@ -3857,7 +3857,7 @@ static long pmcraid_ioctl_passthrough(
52687 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52688 * hrrq_id assigned here in queuecommand
52689 */
52690- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52691+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52692 pinstance->num_hrrq;
52693
52694 if (request_size) {
52695@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
52696
52697 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
52698 /* add resources only after host is added into system */
52699- if (!atomic_read(&pinstance->expose_resources))
52700+ if (!atomic_read_unchecked(&pinstance->expose_resources))
52701 return;
52702
52703 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
52704@@ -5322,8 +5322,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
52705 init_waitqueue_head(&pinstance->reset_wait_q);
52706
52707 atomic_set(&pinstance->outstanding_cmds, 0);
52708- atomic_set(&pinstance->last_message_id, 0);
52709- atomic_set(&pinstance->expose_resources, 0);
52710+ atomic_set_unchecked(&pinstance->last_message_id, 0);
52711+ atomic_set_unchecked(&pinstance->expose_resources, 0);
52712
52713 INIT_LIST_HEAD(&pinstance->free_res_q);
52714 INIT_LIST_HEAD(&pinstance->used_res_q);
52715@@ -6036,7 +6036,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
52716 /* Schedule worker thread to handle CCN and take care of adding and
52717 * removing devices to OS
52718 */
52719- atomic_set(&pinstance->expose_resources, 1);
52720+ atomic_set_unchecked(&pinstance->expose_resources, 1);
52721 schedule_work(&pinstance->worker_q);
52722 return rc;
52723
52724diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
52725index e1d150f..6c6df44 100644
52726--- a/drivers/scsi/pmcraid.h
52727+++ b/drivers/scsi/pmcraid.h
52728@@ -748,7 +748,7 @@ struct pmcraid_instance {
52729 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
52730
52731 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
52732- atomic_t last_message_id;
52733+ atomic_unchecked_t last_message_id;
52734
52735 /* configuration table */
52736 struct pmcraid_config_table *cfg_table;
52737@@ -777,7 +777,7 @@ struct pmcraid_instance {
52738 atomic_t outstanding_cmds;
52739
52740 /* should add/delete resources to mid-layer now ?*/
52741- atomic_t expose_resources;
52742+ atomic_unchecked_t expose_resources;
52743
52744
52745
52746@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
52747 struct pmcraid_config_table_entry_ext cfg_entry_ext;
52748 };
52749 struct scsi_device *scsi_dev; /* Link scsi_device structure */
52750- atomic_t read_failures; /* count of failed READ commands */
52751- atomic_t write_failures; /* count of failed WRITE commands */
52752+ atomic_unchecked_t read_failures; /* count of failed READ commands */
52753+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
52754
52755 /* To indicate add/delete/modify during CCN */
52756 u8 change_detected;
52757diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
52758index 16fe519..3b1ec82 100644
52759--- a/drivers/scsi/qla2xxx/qla_attr.c
52760+++ b/drivers/scsi/qla2xxx/qla_attr.c
52761@@ -2188,7 +2188,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
52762 return 0;
52763 }
52764
52765-struct fc_function_template qla2xxx_transport_functions = {
52766+fc_function_template_no_const qla2xxx_transport_functions = {
52767
52768 .show_host_node_name = 1,
52769 .show_host_port_name = 1,
52770@@ -2236,7 +2236,7 @@ struct fc_function_template qla2xxx_transport_functions = {
52771 .bsg_timeout = qla24xx_bsg_timeout,
52772 };
52773
52774-struct fc_function_template qla2xxx_transport_vport_functions = {
52775+fc_function_template_no_const qla2xxx_transport_vport_functions = {
52776
52777 .show_host_node_name = 1,
52778 .show_host_port_name = 1,
52779diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
52780index d48dea8..0845f78 100644
52781--- a/drivers/scsi/qla2xxx/qla_gbl.h
52782+++ b/drivers/scsi/qla2xxx/qla_gbl.h
52783@@ -569,8 +569,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
52784 struct device_attribute;
52785 extern struct device_attribute *qla2x00_host_attrs[];
52786 struct fc_function_template;
52787-extern struct fc_function_template qla2xxx_transport_functions;
52788-extern struct fc_function_template qla2xxx_transport_vport_functions;
52789+extern fc_function_template_no_const qla2xxx_transport_functions;
52790+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
52791 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
52792 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
52793 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
52794diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
52795index d96bfb5..d7afe90 100644
52796--- a/drivers/scsi/qla2xxx/qla_os.c
52797+++ b/drivers/scsi/qla2xxx/qla_os.c
52798@@ -1490,8 +1490,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
52799 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
52800 /* Ok, a 64bit DMA mask is applicable. */
52801 ha->flags.enable_64bit_addressing = 1;
52802- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52803- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52804+ pax_open_kernel();
52805+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52806+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52807+ pax_close_kernel();
52808 return;
52809 }
52810 }
52811diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
52812index 8f6d0fb..1b21097 100644
52813--- a/drivers/scsi/qla4xxx/ql4_def.h
52814+++ b/drivers/scsi/qla4xxx/ql4_def.h
52815@@ -305,7 +305,7 @@ struct ddb_entry {
52816 * (4000 only) */
52817 atomic_t relogin_timer; /* Max Time to wait for
52818 * relogin to complete */
52819- atomic_t relogin_retry_count; /* Num of times relogin has been
52820+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
52821 * retried */
52822 uint32_t default_time2wait; /* Default Min time between
52823 * relogins (+aens) */
52824diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
52825index 3202063..f9f0ff6 100644
52826--- a/drivers/scsi/qla4xxx/ql4_os.c
52827+++ b/drivers/scsi/qla4xxx/ql4_os.c
52828@@ -4494,12 +4494,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
52829 */
52830 if (!iscsi_is_session_online(cls_sess)) {
52831 /* Reset retry relogin timer */
52832- atomic_inc(&ddb_entry->relogin_retry_count);
52833+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
52834 DEBUG2(ql4_printk(KERN_INFO, ha,
52835 "%s: index[%d] relogin timed out-retrying"
52836 " relogin (%d), retry (%d)\n", __func__,
52837 ddb_entry->fw_ddb_index,
52838- atomic_read(&ddb_entry->relogin_retry_count),
52839+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
52840 ddb_entry->default_time2wait + 4));
52841 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
52842 atomic_set(&ddb_entry->retry_relogin_timer,
52843@@ -6607,7 +6607,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
52844
52845 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
52846 atomic_set(&ddb_entry->relogin_timer, 0);
52847- atomic_set(&ddb_entry->relogin_retry_count, 0);
52848+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
52849 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
52850 ddb_entry->default_relogin_timeout =
52851 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
52852diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
52853index 88d46fe..7351be5 100644
52854--- a/drivers/scsi/scsi.c
52855+++ b/drivers/scsi/scsi.c
52856@@ -640,7 +640,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
52857 struct Scsi_Host *host = cmd->device->host;
52858 int rtn = 0;
52859
52860- atomic_inc(&cmd->device->iorequest_cnt);
52861+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52862
52863 /* check if the device is still usable */
52864 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
52865diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
52866index 3f50dfc..86af487 100644
52867--- a/drivers/scsi/scsi_lib.c
52868+++ b/drivers/scsi/scsi_lib.c
52869@@ -1423,7 +1423,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
52870 shost = sdev->host;
52871 scsi_init_cmd_errh(cmd);
52872 cmd->result = DID_NO_CONNECT << 16;
52873- atomic_inc(&cmd->device->iorequest_cnt);
52874+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52875
52876 /*
52877 * SCSI request completion path will do scsi_device_unbusy(),
52878@@ -1449,9 +1449,9 @@ static void scsi_softirq_done(struct request *rq)
52879
52880 INIT_LIST_HEAD(&cmd->eh_entry);
52881
52882- atomic_inc(&cmd->device->iodone_cnt);
52883+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
52884 if (cmd->result)
52885- atomic_inc(&cmd->device->ioerr_cnt);
52886+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
52887
52888 disposition = scsi_decide_disposition(cmd);
52889 if (disposition != SUCCESS &&
52890diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
52891index 074e8cc..f612e5c 100644
52892--- a/drivers/scsi/scsi_sysfs.c
52893+++ b/drivers/scsi/scsi_sysfs.c
52894@@ -780,7 +780,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
52895 char *buf) \
52896 { \
52897 struct scsi_device *sdev = to_scsi_device(dev); \
52898- unsigned long long count = atomic_read(&sdev->field); \
52899+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
52900 return snprintf(buf, 20, "0x%llx\n", count); \
52901 } \
52902 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
52903diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
52904index e51add0..1e06a96 100644
52905--- a/drivers/scsi/scsi_tgt_lib.c
52906+++ b/drivers/scsi/scsi_tgt_lib.c
52907@@ -363,7 +363,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
52908 int err;
52909
52910 dprintk("%lx %u\n", uaddr, len);
52911- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
52912+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
52913 if (err) {
52914 /*
52915 * TODO: need to fixup sg_tablesize, max_segment_size,
52916diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
52917index 521f583..6b15966 100644
52918--- a/drivers/scsi/scsi_transport_fc.c
52919+++ b/drivers/scsi/scsi_transport_fc.c
52920@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
52921 * Netlink Infrastructure
52922 */
52923
52924-static atomic_t fc_event_seq;
52925+static atomic_unchecked_t fc_event_seq;
52926
52927 /**
52928 * fc_get_event_number - Obtain the next sequential FC event number
52929@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
52930 u32
52931 fc_get_event_number(void)
52932 {
52933- return atomic_add_return(1, &fc_event_seq);
52934+ return atomic_add_return_unchecked(1, &fc_event_seq);
52935 }
52936 EXPORT_SYMBOL(fc_get_event_number);
52937
52938@@ -655,7 +655,7 @@ static __init int fc_transport_init(void)
52939 {
52940 int error;
52941
52942- atomic_set(&fc_event_seq, 0);
52943+ atomic_set_unchecked(&fc_event_seq, 0);
52944
52945 error = transport_class_register(&fc_host_class);
52946 if (error)
52947@@ -845,7 +845,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
52948 char *cp;
52949
52950 *val = simple_strtoul(buf, &cp, 0);
52951- if ((*cp && (*cp != '\n')) || (*val < 0))
52952+ if (*cp && (*cp != '\n'))
52953 return -EINVAL;
52954 /*
52955 * Check for overflow; dev_loss_tmo is u32
52956diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
52957index 0102a2d..cc3f8e9 100644
52958--- a/drivers/scsi/scsi_transport_iscsi.c
52959+++ b/drivers/scsi/scsi_transport_iscsi.c
52960@@ -79,7 +79,7 @@ struct iscsi_internal {
52961 struct transport_container session_cont;
52962 };
52963
52964-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
52965+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
52966 static struct workqueue_struct *iscsi_eh_timer_workq;
52967
52968 static DEFINE_IDA(iscsi_sess_ida);
52969@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
52970 int err;
52971
52972 ihost = shost->shost_data;
52973- session->sid = atomic_add_return(1, &iscsi_session_nr);
52974+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
52975
52976 if (target_id == ISCSI_MAX_TARGET) {
52977 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
52978@@ -4511,7 +4511,7 @@ static __init int iscsi_transport_init(void)
52979 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
52980 ISCSI_TRANSPORT_VERSION);
52981
52982- atomic_set(&iscsi_session_nr, 0);
52983+ atomic_set_unchecked(&iscsi_session_nr, 0);
52984
52985 err = class_register(&iscsi_transport_class);
52986 if (err)
52987diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
52988index 13e8983..d306a68 100644
52989--- a/drivers/scsi/scsi_transport_srp.c
52990+++ b/drivers/scsi/scsi_transport_srp.c
52991@@ -36,7 +36,7 @@
52992 #include "scsi_transport_srp_internal.h"
52993
52994 struct srp_host_attrs {
52995- atomic_t next_port_id;
52996+ atomic_unchecked_t next_port_id;
52997 };
52998 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
52999
53000@@ -101,7 +101,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
53001 struct Scsi_Host *shost = dev_to_shost(dev);
53002 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
53003
53004- atomic_set(&srp_host->next_port_id, 0);
53005+ atomic_set_unchecked(&srp_host->next_port_id, 0);
53006 return 0;
53007 }
53008
53009@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
53010 rport_fast_io_fail_timedout);
53011 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
53012
53013- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
53014+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
53015 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
53016
53017 transport_setup_device(&rport->dev);
53018diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
53019index 6825eda..be470c4 100644
53020--- a/drivers/scsi/sd.c
53021+++ b/drivers/scsi/sd.c
53022@@ -2954,7 +2954,7 @@ static int sd_probe(struct device *dev)
53023 sdkp->disk = gd;
53024 sdkp->index = index;
53025 atomic_set(&sdkp->openers, 0);
53026- atomic_set(&sdkp->device->ioerr_cnt, 0);
53027+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
53028
53029 if (!sdp->request_queue->rq_timeout) {
53030 if (sdp->type != TYPE_MOD)
53031diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
53032index 53268aab..17c2764 100644
53033--- a/drivers/scsi/sg.c
53034+++ b/drivers/scsi/sg.c
53035@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
53036 sdp->disk->disk_name,
53037 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
53038 NULL,
53039- (char *)arg);
53040+ (char __user *)arg);
53041 case BLKTRACESTART:
53042 return blk_trace_startstop(sdp->device->request_queue, 1);
53043 case BLKTRACESTOP:
53044diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
53045index d4f9670..d37b662 100644
53046--- a/drivers/spi/spi.c
53047+++ b/drivers/spi/spi.c
53048@@ -2204,7 +2204,7 @@ int spi_bus_unlock(struct spi_master *master)
53049 EXPORT_SYMBOL_GPL(spi_bus_unlock);
53050
53051 /* portable code must never pass more than 32 bytes */
53052-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
53053+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
53054
53055 static u8 *buf;
53056
53057diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
53058index c341ac1..bf9799f 100644
53059--- a/drivers/staging/android/timed_output.c
53060+++ b/drivers/staging/android/timed_output.c
53061@@ -25,7 +25,7 @@
53062 #include "timed_output.h"
53063
53064 static struct class *timed_output_class;
53065-static atomic_t device_count;
53066+static atomic_unchecked_t device_count;
53067
53068 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
53069 char *buf)
53070@@ -63,7 +63,7 @@ static int create_timed_output_class(void)
53071 timed_output_class = class_create(THIS_MODULE, "timed_output");
53072 if (IS_ERR(timed_output_class))
53073 return PTR_ERR(timed_output_class);
53074- atomic_set(&device_count, 0);
53075+ atomic_set_unchecked(&device_count, 0);
53076 timed_output_class->dev_groups = timed_output_groups;
53077 }
53078
53079@@ -81,7 +81,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
53080 if (ret < 0)
53081 return ret;
53082
53083- tdev->index = atomic_inc_return(&device_count);
53084+ tdev->index = atomic_inc_return_unchecked(&device_count);
53085 tdev->dev = device_create(timed_output_class, NULL,
53086 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
53087 if (IS_ERR(tdev->dev))
53088diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
53089index fe47cd3..19a1bd1 100644
53090--- a/drivers/staging/gdm724x/gdm_tty.c
53091+++ b/drivers/staging/gdm724x/gdm_tty.c
53092@@ -44,7 +44,7 @@
53093 #define gdm_tty_send_control(n, r, v, d, l) (\
53094 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
53095
53096-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
53097+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
53098
53099 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
53100 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
53101diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
53102index def8280..e3fd96a 100644
53103--- a/drivers/staging/imx-drm/imx-drm-core.c
53104+++ b/drivers/staging/imx-drm/imx-drm-core.c
53105@@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
53106 if (imxdrm->pipes >= MAX_CRTC)
53107 return -EINVAL;
53108
53109- if (imxdrm->drm->open_count)
53110+ if (local_read(&imxdrm->drm->open_count))
53111 return -EBUSY;
53112
53113 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
53114diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
53115index 3f8020c..649fded 100644
53116--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
53117+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
53118@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
53119 return 0;
53120 }
53121
53122-sfw_test_client_ops_t brw_test_client;
53123-void brw_init_test_client(void)
53124-{
53125- brw_test_client.tso_init = brw_client_init;
53126- brw_test_client.tso_fini = brw_client_fini;
53127- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
53128- brw_test_client.tso_done_rpc = brw_client_done_rpc;
53129+sfw_test_client_ops_t brw_test_client = {
53130+ .tso_init = brw_client_init,
53131+ .tso_fini = brw_client_fini,
53132+ .tso_prep_rpc = brw_client_prep_rpc,
53133+ .tso_done_rpc = brw_client_done_rpc,
53134 };
53135
53136 srpc_service_t brw_test_service;
53137diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
53138index 050723a..fa6fdf1 100644
53139--- a/drivers/staging/lustre/lnet/selftest/framework.c
53140+++ b/drivers/staging/lustre/lnet/selftest/framework.c
53141@@ -1635,12 +1635,10 @@ static srpc_service_t sfw_services[] =
53142
53143 extern sfw_test_client_ops_t ping_test_client;
53144 extern srpc_service_t ping_test_service;
53145-extern void ping_init_test_client(void);
53146 extern void ping_init_test_service(void);
53147
53148 extern sfw_test_client_ops_t brw_test_client;
53149 extern srpc_service_t brw_test_service;
53150-extern void brw_init_test_client(void);
53151 extern void brw_init_test_service(void);
53152
53153
53154@@ -1684,12 +1682,10 @@ sfw_startup (void)
53155 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
53156 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
53157
53158- brw_init_test_client();
53159 brw_init_test_service();
53160 rc = sfw_register_test(&brw_test_service, &brw_test_client);
53161 LASSERT (rc == 0);
53162
53163- ping_init_test_client();
53164 ping_init_test_service();
53165 rc = sfw_register_test(&ping_test_service, &ping_test_client);
53166 LASSERT (rc == 0);
53167diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
53168index 750cac4..e4d751f 100644
53169--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
53170+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
53171@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
53172 return 0;
53173 }
53174
53175-sfw_test_client_ops_t ping_test_client;
53176-void ping_init_test_client(void)
53177-{
53178- ping_test_client.tso_init = ping_client_init;
53179- ping_test_client.tso_fini = ping_client_fini;
53180- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
53181- ping_test_client.tso_done_rpc = ping_client_done_rpc;
53182-}
53183+sfw_test_client_ops_t ping_test_client = {
53184+ .tso_init = ping_client_init,
53185+ .tso_fini = ping_client_fini,
53186+ .tso_prep_rpc = ping_client_prep_rpc,
53187+ .tso_done_rpc = ping_client_done_rpc,
53188+};
53189
53190 srpc_service_t ping_test_service;
53191 void ping_init_test_service(void)
53192diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53193index 0c6b784..c64235c 100644
53194--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
53195+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53196@@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
53197 ldlm_completion_callback lcs_completion;
53198 ldlm_blocking_callback lcs_blocking;
53199 ldlm_glimpse_callback lcs_glimpse;
53200-};
53201+} __no_const;
53202
53203 /* ldlm_lockd.c */
53204 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
53205diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
53206index d5c4613..a341678 100644
53207--- a/drivers/staging/lustre/lustre/include/obd.h
53208+++ b/drivers/staging/lustre/lustre/include/obd.h
53209@@ -1439,7 +1439,7 @@ struct md_ops {
53210 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
53211 * wrapper function in include/linux/obd_class.h.
53212 */
53213-};
53214+} __no_const;
53215
53216 struct lsm_operations {
53217 void (*lsm_free)(struct lov_stripe_md *);
53218diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53219index 986bf38..eab2558f 100644
53220--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53221+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53222@@ -259,7 +259,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
53223 int added = (mode == LCK_NL);
53224 int overlaps = 0;
53225 int splitted = 0;
53226- const struct ldlm_callback_suite null_cbs = { NULL };
53227+ const struct ldlm_callback_suite null_cbs = { };
53228
53229 CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
53230 LPU64" end "LPU64"\n", *flags,
53231diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53232index e947b91..f408990 100644
53233--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53234+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53235@@ -217,7 +217,7 @@ DECLARE_PROC_HANDLER(proc_debug_mb)
53236 int LL_PROC_PROTO(proc_console_max_delay_cs)
53237 {
53238 int rc, max_delay_cs;
53239- ctl_table_t dummy = *table;
53240+ ctl_table_no_const dummy = *table;
53241 cfs_duration_t d;
53242
53243 dummy.data = &max_delay_cs;
53244@@ -248,7 +248,7 @@ int LL_PROC_PROTO(proc_console_max_delay_cs)
53245 int LL_PROC_PROTO(proc_console_min_delay_cs)
53246 {
53247 int rc, min_delay_cs;
53248- ctl_table_t dummy = *table;
53249+ ctl_table_no_const dummy = *table;
53250 cfs_duration_t d;
53251
53252 dummy.data = &min_delay_cs;
53253@@ -279,7 +279,7 @@ int LL_PROC_PROTO(proc_console_min_delay_cs)
53254 int LL_PROC_PROTO(proc_console_backoff)
53255 {
53256 int rc, backoff;
53257- ctl_table_t dummy = *table;
53258+ ctl_table_no_const dummy = *table;
53259
53260 dummy.data = &backoff;
53261 dummy.proc_handler = &proc_dointvec;
53262diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
53263index b16ee08..a3db5c6 100644
53264--- a/drivers/staging/lustre/lustre/libcfs/module.c
53265+++ b/drivers/staging/lustre/lustre/libcfs/module.c
53266@@ -314,11 +314,11 @@ out:
53267
53268
53269 struct cfs_psdev_ops libcfs_psdev_ops = {
53270- libcfs_psdev_open,
53271- libcfs_psdev_release,
53272- NULL,
53273- NULL,
53274- libcfs_ioctl
53275+ .p_open = libcfs_psdev_open,
53276+ .p_close = libcfs_psdev_release,
53277+ .p_read = NULL,
53278+ .p_write = NULL,
53279+ .p_ioctl = libcfs_ioctl
53280 };
53281
53282 extern int insert_proc(void);
53283diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
53284index ae6f61a..03c3d5d 100644
53285--- a/drivers/staging/lustre/lustre/llite/dir.c
53286+++ b/drivers/staging/lustre/lustre/llite/dir.c
53287@@ -660,7 +660,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
53288 int mode;
53289 int err;
53290
53291- mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
53292+ mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current_umask()) | S_IFDIR;
53293 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
53294 strlen(filename), mode, LUSTRE_OPC_MKDIR,
53295 lump);
53296diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
53297index f670469..03b7438 100644
53298--- a/drivers/staging/media/solo6x10/solo6x10-core.c
53299+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
53300@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
53301
53302 static int solo_sysfs_init(struct solo_dev *solo_dev)
53303 {
53304- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
53305+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
53306 struct device *dev = &solo_dev->dev;
53307 const char *driver;
53308 int i;
53309diff --git a/drivers/staging/media/solo6x10/solo6x10-g723.c b/drivers/staging/media/solo6x10/solo6x10-g723.c
53310index 74f037b..5b5bb76 100644
53311--- a/drivers/staging/media/solo6x10/solo6x10-g723.c
53312+++ b/drivers/staging/media/solo6x10/solo6x10-g723.c
53313@@ -355,7 +355,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
53314
53315 int solo_g723_init(struct solo_dev *solo_dev)
53316 {
53317- static struct snd_device_ops ops = { NULL };
53318+ static struct snd_device_ops ops = { };
53319 struct snd_card *card;
53320 struct snd_kcontrol_new kctl;
53321 char name[32];
53322diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c
53323index 7f2f247..d999137 100644
53324--- a/drivers/staging/media/solo6x10/solo6x10-p2m.c
53325+++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c
53326@@ -77,7 +77,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
53327
53328 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
53329 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
53330- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
53331+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
53332 if (p2m_id < 0)
53333 p2m_id = -p2m_id;
53334 }
53335diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
53336index 8964f8b..36eb087 100644
53337--- a/drivers/staging/media/solo6x10/solo6x10.h
53338+++ b/drivers/staging/media/solo6x10/solo6x10.h
53339@@ -237,7 +237,7 @@ struct solo_dev {
53340
53341 /* P2M DMA Engine */
53342 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
53343- atomic_t p2m_count;
53344+ atomic_unchecked_t p2m_count;
53345 int p2m_jiffies;
53346 unsigned int p2m_timeouts;
53347
53348diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
53349index a0f4868..139f1fb 100644
53350--- a/drivers/staging/octeon/ethernet-rx.c
53351+++ b/drivers/staging/octeon/ethernet-rx.c
53352@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53353 /* Increment RX stats for virtual ports */
53354 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
53355 #ifdef CONFIG_64BIT
53356- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
53357- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
53358+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
53359+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
53360 #else
53361- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
53362- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
53363+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
53364+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
53365 #endif
53366 }
53367 netif_receive_skb(skb);
53368@@ -432,9 +432,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53369 dev->name);
53370 */
53371 #ifdef CONFIG_64BIT
53372- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
53373+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53374 #else
53375- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
53376+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
53377 #endif
53378 dev_kfree_skb_irq(skb);
53379 }
53380diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
53381index da9dd6b..8e3e0f5 100644
53382--- a/drivers/staging/octeon/ethernet.c
53383+++ b/drivers/staging/octeon/ethernet.c
53384@@ -247,11 +247,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
53385 * since the RX tasklet also increments it.
53386 */
53387 #ifdef CONFIG_64BIT
53388- atomic64_add(rx_status.dropped_packets,
53389- (atomic64_t *)&priv->stats.rx_dropped);
53390+ atomic64_add_unchecked(rx_status.dropped_packets,
53391+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53392 #else
53393- atomic_add(rx_status.dropped_packets,
53394- (atomic_t *)&priv->stats.rx_dropped);
53395+ atomic_add_unchecked(rx_status.dropped_packets,
53396+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
53397 #endif
53398 }
53399
53400diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
53401index c59fccd..79f8fc2 100644
53402--- a/drivers/staging/rtl8188eu/include/hal_intf.h
53403+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
53404@@ -267,7 +267,7 @@ struct hal_ops {
53405 s32 (*c2h_handler)(struct adapter *padapter,
53406 struct c2h_evt_hdr *c2h_evt);
53407 c2h_id_filter c2h_id_filter_ccx;
53408-};
53409+} __no_const;
53410
53411 enum rt_eeprom_type {
53412 EEPROM_93C46,
53413diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
53414index e8790f8..b4a5980 100644
53415--- a/drivers/staging/rtl8188eu/include/rtw_io.h
53416+++ b/drivers/staging/rtl8188eu/include/rtw_io.h
53417@@ -124,7 +124,7 @@ struct _io_ops {
53418 u32 (*_write_scsi)(struct intf_hdl *pintfhdl, u32 cnt, u8 *pmem);
53419 void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
53420 void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
53421-};
53422+} __no_const;
53423
53424 struct io_req {
53425 struct list_head list;
53426diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
53427index dc23395..cf7e9b1 100644
53428--- a/drivers/staging/rtl8712/rtl871x_io.h
53429+++ b/drivers/staging/rtl8712/rtl871x_io.h
53430@@ -108,7 +108,7 @@ struct _io_ops {
53431 u8 *pmem);
53432 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
53433 u8 *pmem);
53434-};
53435+} __no_const;
53436
53437 struct io_req {
53438 struct list_head list;
53439diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
53440index a863a98..d272795 100644
53441--- a/drivers/staging/usbip/vhci.h
53442+++ b/drivers/staging/usbip/vhci.h
53443@@ -83,7 +83,7 @@ struct vhci_hcd {
53444 unsigned resuming:1;
53445 unsigned long re_timeout;
53446
53447- atomic_t seqnum;
53448+ atomic_unchecked_t seqnum;
53449
53450 /*
53451 * NOTE:
53452diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
53453index 0007d30..c06a693 100644
53454--- a/drivers/staging/usbip/vhci_hcd.c
53455+++ b/drivers/staging/usbip/vhci_hcd.c
53456@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
53457
53458 spin_lock(&vdev->priv_lock);
53459
53460- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
53461+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
53462 if (priv->seqnum == 0xffff)
53463 dev_info(&urb->dev->dev, "seqnum max\n");
53464
53465@@ -686,7 +686,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
53466 return -ENOMEM;
53467 }
53468
53469- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
53470+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
53471 if (unlink->seqnum == 0xffff)
53472 pr_info("seqnum max\n");
53473
53474@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
53475 vdev->rhport = rhport;
53476 }
53477
53478- atomic_set(&vhci->seqnum, 0);
53479+ atomic_set_unchecked(&vhci->seqnum, 0);
53480 spin_lock_init(&vhci->lock);
53481
53482 hcd->power_budget = 0; /* no limit */
53483diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
53484index d07fcb5..358e1e1 100644
53485--- a/drivers/staging/usbip/vhci_rx.c
53486+++ b/drivers/staging/usbip/vhci_rx.c
53487@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
53488 if (!urb) {
53489 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
53490 pr_info("max seqnum %d\n",
53491- atomic_read(&the_controller->seqnum));
53492+ atomic_read_unchecked(&the_controller->seqnum));
53493 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
53494 return;
53495 }
53496diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
53497index 317c2a8..ffeb4ef 100644
53498--- a/drivers/staging/vt6655/hostap.c
53499+++ b/drivers/staging/vt6655/hostap.c
53500@@ -68,14 +68,13 @@ static int msglevel = MSG_LEVEL_INFO;
53501 *
53502 */
53503
53504+static net_device_ops_no_const apdev_netdev_ops;
53505+
53506 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
53507 {
53508 PSDevice apdev_priv;
53509 struct net_device *dev = pDevice->dev;
53510 int ret;
53511- const struct net_device_ops apdev_netdev_ops = {
53512- .ndo_start_xmit = pDevice->tx_80211,
53513- };
53514
53515 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
53516
53517@@ -87,6 +86,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
53518 *apdev_priv = *pDevice;
53519 eth_hw_addr_inherit(pDevice->apdev, dev);
53520
53521+ /* only half broken now */
53522+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
53523 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
53524
53525 pDevice->apdev->type = ARPHRD_IEEE80211;
53526diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
53527index e7e9372..161f530 100644
53528--- a/drivers/target/sbp/sbp_target.c
53529+++ b/drivers/target/sbp/sbp_target.c
53530@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
53531
53532 #define SESSION_MAINTENANCE_INTERVAL HZ
53533
53534-static atomic_t login_id = ATOMIC_INIT(0);
53535+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
53536
53537 static void session_maintenance_work(struct work_struct *);
53538 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
53539@@ -444,7 +444,7 @@ static void sbp_management_request_login(
53540 login->lun = se_lun;
53541 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
53542 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
53543- login->login_id = atomic_inc_return(&login_id);
53544+ login->login_id = atomic_inc_return_unchecked(&login_id);
53545
53546 login->tgt_agt = sbp_target_agent_register(login);
53547 if (IS_ERR(login->tgt_agt)) {
53548diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
53549index 98da901..bb443e8 100644
53550--- a/drivers/target/target_core_device.c
53551+++ b/drivers/target/target_core_device.c
53552@@ -1525,7 +1525,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
53553 spin_lock_init(&dev->se_tmr_lock);
53554 spin_lock_init(&dev->qf_cmd_lock);
53555 sema_init(&dev->caw_sem, 1);
53556- atomic_set(&dev->dev_ordered_id, 0);
53557+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
53558 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
53559 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
53560 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
53561diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
53562index 7fa62fc..abdd041 100644
53563--- a/drivers/target/target_core_transport.c
53564+++ b/drivers/target/target_core_transport.c
53565@@ -1165,7 +1165,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
53566 * Used to determine when ORDERED commands should go from
53567 * Dormant to Active status.
53568 */
53569- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
53570+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
53571 smp_mb__after_atomic();
53572 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
53573 cmd->se_ordered_id, cmd->sam_task_attr,
53574diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
53575index 4b2b999..cad9fa5 100644
53576--- a/drivers/thermal/of-thermal.c
53577+++ b/drivers/thermal/of-thermal.c
53578@@ -30,6 +30,7 @@
53579 #include <linux/err.h>
53580 #include <linux/export.h>
53581 #include <linux/string.h>
53582+#include <linux/mm.h>
53583
53584 #include "thermal_core.h"
53585
53586@@ -341,8 +342,10 @@ thermal_zone_of_add_sensor(struct device_node *zone,
53587 tz->get_trend = get_trend;
53588 tz->sensor_data = data;
53589
53590- tzd->ops->get_temp = of_thermal_get_temp;
53591- tzd->ops->get_trend = of_thermal_get_trend;
53592+ pax_open_kernel();
53593+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
53594+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
53595+ pax_close_kernel();
53596 mutex_unlock(&tzd->lock);
53597
53598 return tzd;
53599@@ -461,8 +464,10 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
53600 return;
53601
53602 mutex_lock(&tzd->lock);
53603- tzd->ops->get_temp = NULL;
53604- tzd->ops->get_trend = NULL;
53605+ pax_open_kernel();
53606+ *(void **)&tzd->ops->get_temp = NULL;
53607+ *(void **)&tzd->ops->get_trend = NULL;
53608+ pax_close_kernel();
53609
53610 tz->get_temp = NULL;
53611 tz->get_trend = NULL;
53612diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
53613index a57bb5a..1f727d33 100644
53614--- a/drivers/tty/cyclades.c
53615+++ b/drivers/tty/cyclades.c
53616@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
53617 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
53618 info->port.count);
53619 #endif
53620- info->port.count++;
53621+ atomic_inc(&info->port.count);
53622 #ifdef CY_DEBUG_COUNT
53623 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
53624- current->pid, info->port.count);
53625+ current->pid, atomic_read(&info->port.count));
53626 #endif
53627
53628 /*
53629@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
53630 for (j = 0; j < cy_card[i].nports; j++) {
53631 info = &cy_card[i].ports[j];
53632
53633- if (info->port.count) {
53634+ if (atomic_read(&info->port.count)) {
53635 /* XXX is the ldisc num worth this? */
53636 struct tty_struct *tty;
53637 struct tty_ldisc *ld;
53638diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
53639index 4fcec1d..5a036f7 100644
53640--- a/drivers/tty/hvc/hvc_console.c
53641+++ b/drivers/tty/hvc/hvc_console.c
53642@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
53643
53644 spin_lock_irqsave(&hp->port.lock, flags);
53645 /* Check and then increment for fast path open. */
53646- if (hp->port.count++ > 0) {
53647+ if (atomic_inc_return(&hp->port.count) > 1) {
53648 spin_unlock_irqrestore(&hp->port.lock, flags);
53649 hvc_kick();
53650 return 0;
53651@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
53652
53653 spin_lock_irqsave(&hp->port.lock, flags);
53654
53655- if (--hp->port.count == 0) {
53656+ if (atomic_dec_return(&hp->port.count) == 0) {
53657 spin_unlock_irqrestore(&hp->port.lock, flags);
53658 /* We are done with the tty pointer now. */
53659 tty_port_tty_set(&hp->port, NULL);
53660@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
53661 */
53662 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
53663 } else {
53664- if (hp->port.count < 0)
53665+ if (atomic_read(&hp->port.count) < 0)
53666 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
53667- hp->vtermno, hp->port.count);
53668+ hp->vtermno, atomic_read(&hp->port.count));
53669 spin_unlock_irqrestore(&hp->port.lock, flags);
53670 }
53671 }
53672@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
53673 * open->hangup case this can be called after the final close so prevent
53674 * that from happening for now.
53675 */
53676- if (hp->port.count <= 0) {
53677+ if (atomic_read(&hp->port.count) <= 0) {
53678 spin_unlock_irqrestore(&hp->port.lock, flags);
53679 return;
53680 }
53681
53682- hp->port.count = 0;
53683+ atomic_set(&hp->port.count, 0);
53684 spin_unlock_irqrestore(&hp->port.lock, flags);
53685 tty_port_tty_set(&hp->port, NULL);
53686
53687@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
53688 return -EPIPE;
53689
53690 /* FIXME what's this (unprotected) check for? */
53691- if (hp->port.count <= 0)
53692+ if (atomic_read(&hp->port.count) <= 0)
53693 return -EIO;
53694
53695 spin_lock_irqsave(&hp->lock, flags);
53696diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
53697index 81e939e..95ead10 100644
53698--- a/drivers/tty/hvc/hvcs.c
53699+++ b/drivers/tty/hvc/hvcs.c
53700@@ -83,6 +83,7 @@
53701 #include <asm/hvcserver.h>
53702 #include <asm/uaccess.h>
53703 #include <asm/vio.h>
53704+#include <asm/local.h>
53705
53706 /*
53707 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
53708@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
53709
53710 spin_lock_irqsave(&hvcsd->lock, flags);
53711
53712- if (hvcsd->port.count > 0) {
53713+ if (atomic_read(&hvcsd->port.count) > 0) {
53714 spin_unlock_irqrestore(&hvcsd->lock, flags);
53715 printk(KERN_INFO "HVCS: vterm state unchanged. "
53716 "The hvcs device node is still in use.\n");
53717@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
53718 }
53719 }
53720
53721- hvcsd->port.count = 0;
53722+ atomic_set(&hvcsd->port.count, 0);
53723 hvcsd->port.tty = tty;
53724 tty->driver_data = hvcsd;
53725
53726@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
53727 unsigned long flags;
53728
53729 spin_lock_irqsave(&hvcsd->lock, flags);
53730- hvcsd->port.count++;
53731+ atomic_inc(&hvcsd->port.count);
53732 hvcsd->todo_mask |= HVCS_SCHED_READ;
53733 spin_unlock_irqrestore(&hvcsd->lock, flags);
53734
53735@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
53736 hvcsd = tty->driver_data;
53737
53738 spin_lock_irqsave(&hvcsd->lock, flags);
53739- if (--hvcsd->port.count == 0) {
53740+ if (atomic_dec_and_test(&hvcsd->port.count)) {
53741
53742 vio_disable_interrupts(hvcsd->vdev);
53743
53744@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
53745
53746 free_irq(irq, hvcsd);
53747 return;
53748- } else if (hvcsd->port.count < 0) {
53749+ } else if (atomic_read(&hvcsd->port.count) < 0) {
53750 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
53751 " is missmanaged.\n",
53752- hvcsd->vdev->unit_address, hvcsd->port.count);
53753+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
53754 }
53755
53756 spin_unlock_irqrestore(&hvcsd->lock, flags);
53757@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
53758
53759 spin_lock_irqsave(&hvcsd->lock, flags);
53760 /* Preserve this so that we know how many kref refs to put */
53761- temp_open_count = hvcsd->port.count;
53762+ temp_open_count = atomic_read(&hvcsd->port.count);
53763
53764 /*
53765 * Don't kref put inside the spinlock because the destruction
53766@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
53767 tty->driver_data = NULL;
53768 hvcsd->port.tty = NULL;
53769
53770- hvcsd->port.count = 0;
53771+ atomic_set(&hvcsd->port.count, 0);
53772
53773 /* This will drop any buffered data on the floor which is OK in a hangup
53774 * scenario. */
53775@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
53776 * the middle of a write operation? This is a crummy place to do this
53777 * but we want to keep it all in the spinlock.
53778 */
53779- if (hvcsd->port.count <= 0) {
53780+ if (atomic_read(&hvcsd->port.count) <= 0) {
53781 spin_unlock_irqrestore(&hvcsd->lock, flags);
53782 return -ENODEV;
53783 }
53784@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
53785 {
53786 struct hvcs_struct *hvcsd = tty->driver_data;
53787
53788- if (!hvcsd || hvcsd->port.count <= 0)
53789+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
53790 return 0;
53791
53792 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
53793diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
53794index 4190199..06d5bfa 100644
53795--- a/drivers/tty/hvc/hvsi.c
53796+++ b/drivers/tty/hvc/hvsi.c
53797@@ -85,7 +85,7 @@ struct hvsi_struct {
53798 int n_outbuf;
53799 uint32_t vtermno;
53800 uint32_t virq;
53801- atomic_t seqno; /* HVSI packet sequence number */
53802+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
53803 uint16_t mctrl;
53804 uint8_t state; /* HVSI protocol state */
53805 uint8_t flags;
53806@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
53807
53808 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
53809 packet.hdr.len = sizeof(struct hvsi_query_response);
53810- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53811+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53812 packet.verb = VSV_SEND_VERSION_NUMBER;
53813 packet.u.version = HVSI_VERSION;
53814 packet.query_seqno = query_seqno+1;
53815@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
53816
53817 packet.hdr.type = VS_QUERY_PACKET_HEADER;
53818 packet.hdr.len = sizeof(struct hvsi_query);
53819- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53820+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53821 packet.verb = verb;
53822
53823 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
53824@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
53825 int wrote;
53826
53827 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
53828- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53829+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53830 packet.hdr.len = sizeof(struct hvsi_control);
53831 packet.verb = VSV_SET_MODEM_CTL;
53832 packet.mask = HVSI_TSDTR;
53833@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
53834 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
53835
53836 packet.hdr.type = VS_DATA_PACKET_HEADER;
53837- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53838+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53839 packet.hdr.len = count + sizeof(struct hvsi_header);
53840 memcpy(&packet.data, buf, count);
53841
53842@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
53843 struct hvsi_control packet __ALIGNED__;
53844
53845 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
53846- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53847+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53848 packet.hdr.len = 6;
53849 packet.verb = VSV_CLOSE_PROTOCOL;
53850
53851@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
53852
53853 tty_port_tty_set(&hp->port, tty);
53854 spin_lock_irqsave(&hp->lock, flags);
53855- hp->port.count++;
53856+ atomic_inc(&hp->port.count);
53857 atomic_set(&hp->seqno, 0);
53858 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
53859 spin_unlock_irqrestore(&hp->lock, flags);
53860@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53861
53862 spin_lock_irqsave(&hp->lock, flags);
53863
53864- if (--hp->port.count == 0) {
53865+ if (atomic_dec_return(&hp->port.count) == 0) {
53866 tty_port_tty_set(&hp->port, NULL);
53867 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
53868
53869@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53870
53871 spin_lock_irqsave(&hp->lock, flags);
53872 }
53873- } else if (hp->port.count < 0)
53874+ } else if (atomic_read(&hp->port.count) < 0)
53875 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
53876- hp - hvsi_ports, hp->port.count);
53877+ hp - hvsi_ports, atomic_read(&hp->port.count));
53878
53879 spin_unlock_irqrestore(&hp->lock, flags);
53880 }
53881@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
53882 tty_port_tty_set(&hp->port, NULL);
53883
53884 spin_lock_irqsave(&hp->lock, flags);
53885- hp->port.count = 0;
53886+ atomic_set(&hp->port.count, 0);
53887 hp->n_outbuf = 0;
53888 spin_unlock_irqrestore(&hp->lock, flags);
53889 }
53890diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
53891index 7ae6c29..05c6dba 100644
53892--- a/drivers/tty/hvc/hvsi_lib.c
53893+++ b/drivers/tty/hvc/hvsi_lib.c
53894@@ -8,7 +8,7 @@
53895
53896 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
53897 {
53898- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
53899+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
53900
53901 /* Assumes that always succeeds, works in practice */
53902 return pv->put_chars(pv->termno, (char *)packet, packet->len);
53903@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
53904
53905 /* Reset state */
53906 pv->established = 0;
53907- atomic_set(&pv->seqno, 0);
53908+ atomic_set_unchecked(&pv->seqno, 0);
53909
53910 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
53911
53912diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
53913index 17ee3bf..8d2520d 100644
53914--- a/drivers/tty/ipwireless/tty.c
53915+++ b/drivers/tty/ipwireless/tty.c
53916@@ -28,6 +28,7 @@
53917 #include <linux/tty_driver.h>
53918 #include <linux/tty_flip.h>
53919 #include <linux/uaccess.h>
53920+#include <asm/local.h>
53921
53922 #include "tty.h"
53923 #include "network.h"
53924@@ -98,10 +99,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53925 mutex_unlock(&tty->ipw_tty_mutex);
53926 return -ENODEV;
53927 }
53928- if (tty->port.count == 0)
53929+ if (atomic_read(&tty->port.count) == 0)
53930 tty->tx_bytes_queued = 0;
53931
53932- tty->port.count++;
53933+ atomic_inc(&tty->port.count);
53934
53935 tty->port.tty = linux_tty;
53936 linux_tty->driver_data = tty;
53937@@ -117,9 +118,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53938
53939 static void do_ipw_close(struct ipw_tty *tty)
53940 {
53941- tty->port.count--;
53942-
53943- if (tty->port.count == 0) {
53944+ if (atomic_dec_return(&tty->port.count) == 0) {
53945 struct tty_struct *linux_tty = tty->port.tty;
53946
53947 if (linux_tty != NULL) {
53948@@ -140,7 +139,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
53949 return;
53950
53951 mutex_lock(&tty->ipw_tty_mutex);
53952- if (tty->port.count == 0) {
53953+ if (atomic_read(&tty->port.count) == 0) {
53954 mutex_unlock(&tty->ipw_tty_mutex);
53955 return;
53956 }
53957@@ -163,7 +162,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
53958
53959 mutex_lock(&tty->ipw_tty_mutex);
53960
53961- if (!tty->port.count) {
53962+ if (!atomic_read(&tty->port.count)) {
53963 mutex_unlock(&tty->ipw_tty_mutex);
53964 return;
53965 }
53966@@ -202,7 +201,7 @@ static int ipw_write(struct tty_struct *linux_tty,
53967 return -ENODEV;
53968
53969 mutex_lock(&tty->ipw_tty_mutex);
53970- if (!tty->port.count) {
53971+ if (!atomic_read(&tty->port.count)) {
53972 mutex_unlock(&tty->ipw_tty_mutex);
53973 return -EINVAL;
53974 }
53975@@ -242,7 +241,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
53976 if (!tty)
53977 return -ENODEV;
53978
53979- if (!tty->port.count)
53980+ if (!atomic_read(&tty->port.count))
53981 return -EINVAL;
53982
53983 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
53984@@ -284,7 +283,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
53985 if (!tty)
53986 return 0;
53987
53988- if (!tty->port.count)
53989+ if (!atomic_read(&tty->port.count))
53990 return 0;
53991
53992 return tty->tx_bytes_queued;
53993@@ -365,7 +364,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
53994 if (!tty)
53995 return -ENODEV;
53996
53997- if (!tty->port.count)
53998+ if (!atomic_read(&tty->port.count))
53999 return -EINVAL;
54000
54001 return get_control_lines(tty);
54002@@ -381,7 +380,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
54003 if (!tty)
54004 return -ENODEV;
54005
54006- if (!tty->port.count)
54007+ if (!atomic_read(&tty->port.count))
54008 return -EINVAL;
54009
54010 return set_control_lines(tty, set, clear);
54011@@ -395,7 +394,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
54012 if (!tty)
54013 return -ENODEV;
54014
54015- if (!tty->port.count)
54016+ if (!atomic_read(&tty->port.count))
54017 return -EINVAL;
54018
54019 /* FIXME: Exactly how is the tty object locked here .. */
54020@@ -551,7 +550,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
54021 * are gone */
54022 mutex_lock(&ttyj->ipw_tty_mutex);
54023 }
54024- while (ttyj->port.count)
54025+ while (atomic_read(&ttyj->port.count))
54026 do_ipw_close(ttyj);
54027 ipwireless_disassociate_network_ttys(network,
54028 ttyj->channel_idx);
54029diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
54030index 1deaca4..c8582d4 100644
54031--- a/drivers/tty/moxa.c
54032+++ b/drivers/tty/moxa.c
54033@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
54034 }
54035
54036 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
54037- ch->port.count++;
54038+ atomic_inc(&ch->port.count);
54039 tty->driver_data = ch;
54040 tty_port_tty_set(&ch->port, tty);
54041 mutex_lock(&ch->port.mutex);
54042diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
54043index 2ebe47b..3205833 100644
54044--- a/drivers/tty/n_gsm.c
54045+++ b/drivers/tty/n_gsm.c
54046@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
54047 spin_lock_init(&dlci->lock);
54048 mutex_init(&dlci->mutex);
54049 dlci->fifo = &dlci->_fifo;
54050- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
54051+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
54052 kfree(dlci);
54053 return NULL;
54054 }
54055@@ -2954,7 +2954,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
54056 struct gsm_dlci *dlci = tty->driver_data;
54057 struct tty_port *port = &dlci->port;
54058
54059- port->count++;
54060+ atomic_inc(&port->count);
54061 tty_port_tty_set(port, tty);
54062
54063 dlci->modem_rx = 0;
54064diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
54065index f44f1ba..a8d5915 100644
54066--- a/drivers/tty/n_tty.c
54067+++ b/drivers/tty/n_tty.c
54068@@ -115,7 +115,7 @@ struct n_tty_data {
54069 int minimum_to_wake;
54070
54071 /* consumer-published */
54072- size_t read_tail;
54073+ size_t read_tail __intentional_overflow(-1);
54074 size_t line_start;
54075
54076 /* protected by output lock */
54077@@ -2517,6 +2517,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
54078 {
54079 *ops = tty_ldisc_N_TTY;
54080 ops->owner = NULL;
54081- ops->refcount = ops->flags = 0;
54082+ atomic_set(&ops->refcount, 0);
54083+ ops->flags = 0;
54084 }
54085 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
54086diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
54087index 25c9bc7..24077b7 100644
54088--- a/drivers/tty/pty.c
54089+++ b/drivers/tty/pty.c
54090@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
54091 panic("Couldn't register Unix98 pts driver");
54092
54093 /* Now create the /dev/ptmx special device */
54094+ pax_open_kernel();
54095 tty_default_fops(&ptmx_fops);
54096- ptmx_fops.open = ptmx_open;
54097+ *(void **)&ptmx_fops.open = ptmx_open;
54098+ pax_close_kernel();
54099
54100 cdev_init(&ptmx_cdev, &ptmx_fops);
54101 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
54102diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
54103index 383c4c7..d408e21 100644
54104--- a/drivers/tty/rocket.c
54105+++ b/drivers/tty/rocket.c
54106@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54107 tty->driver_data = info;
54108 tty_port_tty_set(port, tty);
54109
54110- if (port->count++ == 0) {
54111+ if (atomic_inc_return(&port->count) == 1) {
54112 atomic_inc(&rp_num_ports_open);
54113
54114 #ifdef ROCKET_DEBUG_OPEN
54115@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54116 #endif
54117 }
54118 #ifdef ROCKET_DEBUG_OPEN
54119- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
54120+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
54121 #endif
54122
54123 /*
54124@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
54125 spin_unlock_irqrestore(&info->port.lock, flags);
54126 return;
54127 }
54128- if (info->port.count)
54129+ if (atomic_read(&info->port.count))
54130 atomic_dec(&rp_num_ports_open);
54131 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
54132 spin_unlock_irqrestore(&info->port.lock, flags);
54133diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
54134index 1274499..f541382 100644
54135--- a/drivers/tty/serial/ioc4_serial.c
54136+++ b/drivers/tty/serial/ioc4_serial.c
54137@@ -437,7 +437,7 @@ struct ioc4_soft {
54138 } is_intr_info[MAX_IOC4_INTR_ENTS];
54139
54140 /* Number of entries active in the above array */
54141- atomic_t is_num_intrs;
54142+ atomic_unchecked_t is_num_intrs;
54143 } is_intr_type[IOC4_NUM_INTR_TYPES];
54144
54145 /* is_ir_lock must be held while
54146@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
54147 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
54148 || (type == IOC4_OTHER_INTR_TYPE)));
54149
54150- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
54151+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
54152 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
54153
54154 /* Save off the lower level interrupt handler */
54155@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
54156
54157 soft = arg;
54158 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
54159- num_intrs = (int)atomic_read(
54160+ num_intrs = (int)atomic_read_unchecked(
54161 &soft->is_intr_type[intr_type].is_num_intrs);
54162
54163 this_mir = this_ir = pending_intrs(soft, intr_type);
54164diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
54165index cfadf29..8cf4595 100644
54166--- a/drivers/tty/serial/kgdb_nmi.c
54167+++ b/drivers/tty/serial/kgdb_nmi.c
54168@@ -51,7 +51,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
54169 * I/O utilities that messages sent to the console will automatically
54170 * be displayed on the dbg_io.
54171 */
54172- dbg_io_ops->is_console = true;
54173+ pax_open_kernel();
54174+ *(int *)&dbg_io_ops->is_console = true;
54175+ pax_close_kernel();
54176
54177 return 0;
54178 }
54179diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
54180index a260cde..6b2b5ce 100644
54181--- a/drivers/tty/serial/kgdboc.c
54182+++ b/drivers/tty/serial/kgdboc.c
54183@@ -24,8 +24,9 @@
54184 #define MAX_CONFIG_LEN 40
54185
54186 static struct kgdb_io kgdboc_io_ops;
54187+static struct kgdb_io kgdboc_io_ops_console;
54188
54189-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
54190+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
54191 static int configured = -1;
54192
54193 static char config[MAX_CONFIG_LEN];
54194@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
54195 kgdboc_unregister_kbd();
54196 if (configured == 1)
54197 kgdb_unregister_io_module(&kgdboc_io_ops);
54198+ else if (configured == 2)
54199+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
54200 }
54201
54202 static int configure_kgdboc(void)
54203@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
54204 int err;
54205 char *cptr = config;
54206 struct console *cons;
54207+ int is_console = 0;
54208
54209 err = kgdboc_option_setup(config);
54210 if (err || !strlen(config) || isspace(config[0]))
54211 goto noconfig;
54212
54213 err = -ENODEV;
54214- kgdboc_io_ops.is_console = 0;
54215 kgdb_tty_driver = NULL;
54216
54217 kgdboc_use_kms = 0;
54218@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
54219 int idx;
54220 if (cons->device && cons->device(cons, &idx) == p &&
54221 idx == tty_line) {
54222- kgdboc_io_ops.is_console = 1;
54223+ is_console = 1;
54224 break;
54225 }
54226 cons = cons->next;
54227@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
54228 kgdb_tty_line = tty_line;
54229
54230 do_register:
54231- err = kgdb_register_io_module(&kgdboc_io_ops);
54232+ if (is_console) {
54233+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
54234+ configured = 2;
54235+ } else {
54236+ err = kgdb_register_io_module(&kgdboc_io_ops);
54237+ configured = 1;
54238+ }
54239 if (err)
54240 goto noconfig;
54241
54242@@ -205,8 +214,6 @@ do_register:
54243 if (err)
54244 goto nmi_con_failed;
54245
54246- configured = 1;
54247-
54248 return 0;
54249
54250 nmi_con_failed:
54251@@ -223,7 +230,7 @@ noconfig:
54252 static int __init init_kgdboc(void)
54253 {
54254 /* Already configured? */
54255- if (configured == 1)
54256+ if (configured >= 1)
54257 return 0;
54258
54259 return configure_kgdboc();
54260@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
54261 if (config[len - 1] == '\n')
54262 config[len - 1] = '\0';
54263
54264- if (configured == 1)
54265+ if (configured >= 1)
54266 cleanup_kgdboc();
54267
54268 /* Go and configure with the new params. */
54269@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
54270 .post_exception = kgdboc_post_exp_handler,
54271 };
54272
54273+static struct kgdb_io kgdboc_io_ops_console = {
54274+ .name = "kgdboc",
54275+ .read_char = kgdboc_get_char,
54276+ .write_char = kgdboc_put_char,
54277+ .pre_exception = kgdboc_pre_exp_handler,
54278+ .post_exception = kgdboc_post_exp_handler,
54279+ .is_console = 1
54280+};
54281+
54282 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
54283 /* This is only available if kgdboc is a built in for early debugging */
54284 static int __init kgdboc_early_init(char *opt)
54285diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
54286index 72000a6..a190bc4 100644
54287--- a/drivers/tty/serial/msm_serial.c
54288+++ b/drivers/tty/serial/msm_serial.c
54289@@ -981,7 +981,7 @@ static struct uart_driver msm_uart_driver = {
54290 .cons = MSM_CONSOLE,
54291 };
54292
54293-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
54294+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
54295
54296 static const struct of_device_id msm_uartdm_table[] = {
54297 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
54298@@ -1000,7 +1000,7 @@ static int msm_serial_probe(struct platform_device *pdev)
54299 int irq;
54300
54301 if (pdev->id == -1)
54302- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
54303+ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
54304
54305 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
54306 return -ENXIO;
54307diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
54308index c1d3ebd..f618a93 100644
54309--- a/drivers/tty/serial/samsung.c
54310+++ b/drivers/tty/serial/samsung.c
54311@@ -486,11 +486,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
54312 }
54313 }
54314
54315+static int s3c64xx_serial_startup(struct uart_port *port);
54316 static int s3c24xx_serial_startup(struct uart_port *port)
54317 {
54318 struct s3c24xx_uart_port *ourport = to_ourport(port);
54319 int ret;
54320
54321+ /* Startup sequence is different for s3c64xx and higher SoC's */
54322+ if (s3c24xx_serial_has_interrupt_mask(port))
54323+ return s3c64xx_serial_startup(port);
54324+
54325 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
54326 port, (unsigned long long)port->mapbase, port->membase);
54327
54328@@ -1164,10 +1169,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
54329 /* setup info for port */
54330 port->dev = &platdev->dev;
54331
54332- /* Startup sequence is different for s3c64xx and higher SoC's */
54333- if (s3c24xx_serial_has_interrupt_mask(port))
54334- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
54335-
54336 port->uartclk = 1;
54337
54338 if (cfg->uart_flags & UPF_CONS_FLOW) {
54339diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
54340index fbf6c5a..3939d92 100644
54341--- a/drivers/tty/serial/serial_core.c
54342+++ b/drivers/tty/serial/serial_core.c
54343@@ -1333,7 +1333,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54344
54345 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
54346
54347- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
54348+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
54349 return;
54350
54351 /*
54352@@ -1460,7 +1460,7 @@ static void uart_hangup(struct tty_struct *tty)
54353 uart_flush_buffer(tty);
54354 uart_shutdown(tty, state);
54355 spin_lock_irqsave(&port->lock, flags);
54356- port->count = 0;
54357+ atomic_set(&port->count, 0);
54358 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
54359 spin_unlock_irqrestore(&port->lock, flags);
54360 tty_port_tty_set(port, NULL);
54361@@ -1558,7 +1558,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54362 goto end;
54363 }
54364
54365- port->count++;
54366+ atomic_inc(&port->count);
54367 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
54368 retval = -ENXIO;
54369 goto err_dec_count;
54370@@ -1598,7 +1598,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54371 end:
54372 return retval;
54373 err_dec_count:
54374- port->count--;
54375+ atomic_inc(&port->count);
54376 mutex_unlock(&port->mutex);
54377 goto end;
54378 }
54379diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
54380index d48e040..0f52764 100644
54381--- a/drivers/tty/synclink.c
54382+++ b/drivers/tty/synclink.c
54383@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54384
54385 if (debug_level >= DEBUG_LEVEL_INFO)
54386 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
54387- __FILE__,__LINE__, info->device_name, info->port.count);
54388+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54389
54390 if (tty_port_close_start(&info->port, tty, filp) == 0)
54391 goto cleanup;
54392@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54393 cleanup:
54394 if (debug_level >= DEBUG_LEVEL_INFO)
54395 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
54396- tty->driver->name, info->port.count);
54397+ tty->driver->name, atomic_read(&info->port.count));
54398
54399 } /* end of mgsl_close() */
54400
54401@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
54402
54403 mgsl_flush_buffer(tty);
54404 shutdown(info);
54405-
54406- info->port.count = 0;
54407+
54408+ atomic_set(&info->port.count, 0);
54409 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54410 info->port.tty = NULL;
54411
54412@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54413
54414 if (debug_level >= DEBUG_LEVEL_INFO)
54415 printk("%s(%d):block_til_ready before block on %s count=%d\n",
54416- __FILE__,__LINE__, tty->driver->name, port->count );
54417+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54418
54419 spin_lock_irqsave(&info->irq_spinlock, flags);
54420 if (!tty_hung_up_p(filp)) {
54421 extra_count = true;
54422- port->count--;
54423+ atomic_dec(&port->count);
54424 }
54425 spin_unlock_irqrestore(&info->irq_spinlock, flags);
54426 port->blocked_open++;
54427@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54428
54429 if (debug_level >= DEBUG_LEVEL_INFO)
54430 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
54431- __FILE__,__LINE__, tty->driver->name, port->count );
54432+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54433
54434 tty_unlock(tty);
54435 schedule();
54436@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54437
54438 /* FIXME: Racy on hangup during close wait */
54439 if (extra_count)
54440- port->count++;
54441+ atomic_inc(&port->count);
54442 port->blocked_open--;
54443
54444 if (debug_level >= DEBUG_LEVEL_INFO)
54445 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
54446- __FILE__,__LINE__, tty->driver->name, port->count );
54447+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54448
54449 if (!retval)
54450 port->flags |= ASYNC_NORMAL_ACTIVE;
54451@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54452
54453 if (debug_level >= DEBUG_LEVEL_INFO)
54454 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
54455- __FILE__,__LINE__,tty->driver->name, info->port.count);
54456+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54457
54458 /* If port is closing, signal caller to try again */
54459 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
54460@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54461 spin_unlock_irqrestore(&info->netlock, flags);
54462 goto cleanup;
54463 }
54464- info->port.count++;
54465+ atomic_inc(&info->port.count);
54466 spin_unlock_irqrestore(&info->netlock, flags);
54467
54468- if (info->port.count == 1) {
54469+ if (atomic_read(&info->port.count) == 1) {
54470 /* 1st open on this device, init hardware */
54471 retval = startup(info);
54472 if (retval < 0)
54473@@ -3446,8 +3446,8 @@ cleanup:
54474 if (retval) {
54475 if (tty->count == 1)
54476 info->port.tty = NULL; /* tty layer will release tty struct */
54477- if(info->port.count)
54478- info->port.count--;
54479+ if (atomic_read(&info->port.count))
54480+ atomic_dec(&info->port.count);
54481 }
54482
54483 return retval;
54484@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54485 unsigned short new_crctype;
54486
54487 /* return error if TTY interface open */
54488- if (info->port.count)
54489+ if (atomic_read(&info->port.count))
54490 return -EBUSY;
54491
54492 switch (encoding)
54493@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
54494
54495 /* arbitrate between network and tty opens */
54496 spin_lock_irqsave(&info->netlock, flags);
54497- if (info->port.count != 0 || info->netcount != 0) {
54498+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54499 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54500 spin_unlock_irqrestore(&info->netlock, flags);
54501 return -EBUSY;
54502@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54503 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54504
54505 /* return error if TTY interface open */
54506- if (info->port.count)
54507+ if (atomic_read(&info->port.count))
54508 return -EBUSY;
54509
54510 if (cmd != SIOCWANDEV)
54511diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
54512index c359a91..959fc26 100644
54513--- a/drivers/tty/synclink_gt.c
54514+++ b/drivers/tty/synclink_gt.c
54515@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54516 tty->driver_data = info;
54517 info->port.tty = tty;
54518
54519- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
54520+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
54521
54522 /* If port is closing, signal caller to try again */
54523 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
54524@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54525 mutex_unlock(&info->port.mutex);
54526 goto cleanup;
54527 }
54528- info->port.count++;
54529+ atomic_inc(&info->port.count);
54530 spin_unlock_irqrestore(&info->netlock, flags);
54531
54532- if (info->port.count == 1) {
54533+ if (atomic_read(&info->port.count) == 1) {
54534 /* 1st open on this device, init hardware */
54535 retval = startup(info);
54536 if (retval < 0) {
54537@@ -715,8 +715,8 @@ cleanup:
54538 if (retval) {
54539 if (tty->count == 1)
54540 info->port.tty = NULL; /* tty layer will release tty struct */
54541- if(info->port.count)
54542- info->port.count--;
54543+ if(atomic_read(&info->port.count))
54544+ atomic_dec(&info->port.count);
54545 }
54546
54547 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
54548@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54549
54550 if (sanity_check(info, tty->name, "close"))
54551 return;
54552- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
54553+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
54554
54555 if (tty_port_close_start(&info->port, tty, filp) == 0)
54556 goto cleanup;
54557@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54558 tty_port_close_end(&info->port, tty);
54559 info->port.tty = NULL;
54560 cleanup:
54561- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
54562+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
54563 }
54564
54565 static void hangup(struct tty_struct *tty)
54566@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
54567 shutdown(info);
54568
54569 spin_lock_irqsave(&info->port.lock, flags);
54570- info->port.count = 0;
54571+ atomic_set(&info->port.count, 0);
54572 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54573 info->port.tty = NULL;
54574 spin_unlock_irqrestore(&info->port.lock, flags);
54575@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54576 unsigned short new_crctype;
54577
54578 /* return error if TTY interface open */
54579- if (info->port.count)
54580+ if (atomic_read(&info->port.count))
54581 return -EBUSY;
54582
54583 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
54584@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
54585
54586 /* arbitrate between network and tty opens */
54587 spin_lock_irqsave(&info->netlock, flags);
54588- if (info->port.count != 0 || info->netcount != 0) {
54589+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54590 DBGINFO(("%s hdlc_open busy\n", dev->name));
54591 spin_unlock_irqrestore(&info->netlock, flags);
54592 return -EBUSY;
54593@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54594 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
54595
54596 /* return error if TTY interface open */
54597- if (info->port.count)
54598+ if (atomic_read(&info->port.count))
54599 return -EBUSY;
54600
54601 if (cmd != SIOCWANDEV)
54602@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
54603 if (port == NULL)
54604 continue;
54605 spin_lock(&port->lock);
54606- if ((port->port.count || port->netcount) &&
54607+ if ((atomic_read(&port->port.count) || port->netcount) &&
54608 port->pending_bh && !port->bh_running &&
54609 !port->bh_requested) {
54610 DBGISR(("%s bh queued\n", port->device_name));
54611@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54612 spin_lock_irqsave(&info->lock, flags);
54613 if (!tty_hung_up_p(filp)) {
54614 extra_count = true;
54615- port->count--;
54616+ atomic_dec(&port->count);
54617 }
54618 spin_unlock_irqrestore(&info->lock, flags);
54619 port->blocked_open++;
54620@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54621 remove_wait_queue(&port->open_wait, &wait);
54622
54623 if (extra_count)
54624- port->count++;
54625+ atomic_inc(&port->count);
54626 port->blocked_open--;
54627
54628 if (!retval)
54629diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
54630index 53ba853..3c30f6d 100644
54631--- a/drivers/tty/synclinkmp.c
54632+++ b/drivers/tty/synclinkmp.c
54633@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54634
54635 if (debug_level >= DEBUG_LEVEL_INFO)
54636 printk("%s(%d):%s open(), old ref count = %d\n",
54637- __FILE__,__LINE__,tty->driver->name, info->port.count);
54638+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54639
54640 /* If port is closing, signal caller to try again */
54641 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
54642@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54643 spin_unlock_irqrestore(&info->netlock, flags);
54644 goto cleanup;
54645 }
54646- info->port.count++;
54647+ atomic_inc(&info->port.count);
54648 spin_unlock_irqrestore(&info->netlock, flags);
54649
54650- if (info->port.count == 1) {
54651+ if (atomic_read(&info->port.count) == 1) {
54652 /* 1st open on this device, init hardware */
54653 retval = startup(info);
54654 if (retval < 0)
54655@@ -796,8 +796,8 @@ cleanup:
54656 if (retval) {
54657 if (tty->count == 1)
54658 info->port.tty = NULL; /* tty layer will release tty struct */
54659- if(info->port.count)
54660- info->port.count--;
54661+ if(atomic_read(&info->port.count))
54662+ atomic_dec(&info->port.count);
54663 }
54664
54665 return retval;
54666@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54667
54668 if (debug_level >= DEBUG_LEVEL_INFO)
54669 printk("%s(%d):%s close() entry, count=%d\n",
54670- __FILE__,__LINE__, info->device_name, info->port.count);
54671+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54672
54673 if (tty_port_close_start(&info->port, tty, filp) == 0)
54674 goto cleanup;
54675@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54676 cleanup:
54677 if (debug_level >= DEBUG_LEVEL_INFO)
54678 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
54679- tty->driver->name, info->port.count);
54680+ tty->driver->name, atomic_read(&info->port.count));
54681 }
54682
54683 /* Called by tty_hangup() when a hangup is signaled.
54684@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
54685 shutdown(info);
54686
54687 spin_lock_irqsave(&info->port.lock, flags);
54688- info->port.count = 0;
54689+ atomic_set(&info->port.count, 0);
54690 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54691 info->port.tty = NULL;
54692 spin_unlock_irqrestore(&info->port.lock, flags);
54693@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54694 unsigned short new_crctype;
54695
54696 /* return error if TTY interface open */
54697- if (info->port.count)
54698+ if (atomic_read(&info->port.count))
54699 return -EBUSY;
54700
54701 switch (encoding)
54702@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
54703
54704 /* arbitrate between network and tty opens */
54705 spin_lock_irqsave(&info->netlock, flags);
54706- if (info->port.count != 0 || info->netcount != 0) {
54707+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54708 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54709 spin_unlock_irqrestore(&info->netlock, flags);
54710 return -EBUSY;
54711@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54712 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54713
54714 /* return error if TTY interface open */
54715- if (info->port.count)
54716+ if (atomic_read(&info->port.count))
54717 return -EBUSY;
54718
54719 if (cmd != SIOCWANDEV)
54720@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
54721 * do not request bottom half processing if the
54722 * device is not open in a normal mode.
54723 */
54724- if ( port && (port->port.count || port->netcount) &&
54725+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
54726 port->pending_bh && !port->bh_running &&
54727 !port->bh_requested ) {
54728 if ( debug_level >= DEBUG_LEVEL_ISR )
54729@@ -3319,12 +3319,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54730
54731 if (debug_level >= DEBUG_LEVEL_INFO)
54732 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
54733- __FILE__,__LINE__, tty->driver->name, port->count );
54734+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54735
54736 spin_lock_irqsave(&info->lock, flags);
54737 if (!tty_hung_up_p(filp)) {
54738 extra_count = true;
54739- port->count--;
54740+ atomic_dec(&port->count);
54741 }
54742 spin_unlock_irqrestore(&info->lock, flags);
54743 port->blocked_open++;
54744@@ -3353,7 +3353,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54745
54746 if (debug_level >= DEBUG_LEVEL_INFO)
54747 printk("%s(%d):%s block_til_ready() count=%d\n",
54748- __FILE__,__LINE__, tty->driver->name, port->count );
54749+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54750
54751 tty_unlock(tty);
54752 schedule();
54753@@ -3364,12 +3364,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54754 remove_wait_queue(&port->open_wait, &wait);
54755
54756 if (extra_count)
54757- port->count++;
54758+ atomic_inc(&port->count);
54759 port->blocked_open--;
54760
54761 if (debug_level >= DEBUG_LEVEL_INFO)
54762 printk("%s(%d):%s block_til_ready() after, count=%d\n",
54763- __FILE__,__LINE__, tty->driver->name, port->count );
54764+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54765
54766 if (!retval)
54767 port->flags |= ASYNC_NORMAL_ACTIVE;
54768diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
54769index 454b658..57b1430 100644
54770--- a/drivers/tty/sysrq.c
54771+++ b/drivers/tty/sysrq.c
54772@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
54773 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
54774 size_t count, loff_t *ppos)
54775 {
54776- if (count) {
54777+ if (count && capable(CAP_SYS_ADMIN)) {
54778 char c;
54779
54780 if (get_user(c, buf))
54781diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
54782index 3411071..86f2cf2 100644
54783--- a/drivers/tty/tty_io.c
54784+++ b/drivers/tty/tty_io.c
54785@@ -3475,7 +3475,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
54786
54787 void tty_default_fops(struct file_operations *fops)
54788 {
54789- *fops = tty_fops;
54790+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
54791 }
54792
54793 /*
54794diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
54795index 2d822aa..a566234 100644
54796--- a/drivers/tty/tty_ldisc.c
54797+++ b/drivers/tty/tty_ldisc.c
54798@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
54799 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54800 tty_ldiscs[disc] = new_ldisc;
54801 new_ldisc->num = disc;
54802- new_ldisc->refcount = 0;
54803+ atomic_set(&new_ldisc->refcount, 0);
54804 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
54805
54806 return ret;
54807@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
54808 return -EINVAL;
54809
54810 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54811- if (tty_ldiscs[disc]->refcount)
54812+ if (atomic_read(&tty_ldiscs[disc]->refcount))
54813 ret = -EBUSY;
54814 else
54815 tty_ldiscs[disc] = NULL;
54816@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
54817 if (ldops) {
54818 ret = ERR_PTR(-EAGAIN);
54819 if (try_module_get(ldops->owner)) {
54820- ldops->refcount++;
54821+ atomic_inc(&ldops->refcount);
54822 ret = ldops;
54823 }
54824 }
54825@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
54826 unsigned long flags;
54827
54828 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54829- ldops->refcount--;
54830+ atomic_dec(&ldops->refcount);
54831 module_put(ldops->owner);
54832 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
54833 }
54834diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
54835index 3f746c8..2f2fcaa 100644
54836--- a/drivers/tty/tty_port.c
54837+++ b/drivers/tty/tty_port.c
54838@@ -235,7 +235,7 @@ void tty_port_hangup(struct tty_port *port)
54839 unsigned long flags;
54840
54841 spin_lock_irqsave(&port->lock, flags);
54842- port->count = 0;
54843+ atomic_set(&port->count, 0);
54844 port->flags &= ~ASYNC_NORMAL_ACTIVE;
54845 tty = port->tty;
54846 if (tty)
54847@@ -393,7 +393,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54848 /* The port lock protects the port counts */
54849 spin_lock_irqsave(&port->lock, flags);
54850 if (!tty_hung_up_p(filp))
54851- port->count--;
54852+ atomic_dec(&port->count);
54853 port->blocked_open++;
54854 spin_unlock_irqrestore(&port->lock, flags);
54855
54856@@ -435,7 +435,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54857 we must not mess that up further */
54858 spin_lock_irqsave(&port->lock, flags);
54859 if (!tty_hung_up_p(filp))
54860- port->count++;
54861+ atomic_inc(&port->count);
54862 port->blocked_open--;
54863 if (retval == 0)
54864 port->flags |= ASYNC_NORMAL_ACTIVE;
54865@@ -469,19 +469,19 @@ int tty_port_close_start(struct tty_port *port,
54866 return 0;
54867 }
54868
54869- if (tty->count == 1 && port->count != 1) {
54870+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
54871 printk(KERN_WARNING
54872 "tty_port_close_start: tty->count = 1 port count = %d.\n",
54873- port->count);
54874- port->count = 1;
54875+ atomic_read(&port->count));
54876+ atomic_set(&port->count, 1);
54877 }
54878- if (--port->count < 0) {
54879+ if (atomic_dec_return(&port->count) < 0) {
54880 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
54881- port->count);
54882- port->count = 0;
54883+ atomic_read(&port->count));
54884+ atomic_set(&port->count, 0);
54885 }
54886
54887- if (port->count) {
54888+ if (atomic_read(&port->count)) {
54889 spin_unlock_irqrestore(&port->lock, flags);
54890 return 0;
54891 }
54892@@ -563,7 +563,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
54893 {
54894 spin_lock_irq(&port->lock);
54895 if (!tty_hung_up_p(filp))
54896- ++port->count;
54897+ atomic_inc(&port->count);
54898 spin_unlock_irq(&port->lock);
54899 tty_port_tty_set(port, tty);
54900
54901diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
54902index d0e3a44..5f8b754 100644
54903--- a/drivers/tty/vt/keyboard.c
54904+++ b/drivers/tty/vt/keyboard.c
54905@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
54906 kbd->kbdmode == VC_OFF) &&
54907 value != KVAL(K_SAK))
54908 return; /* SAK is allowed even in raw mode */
54909+
54910+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54911+ {
54912+ void *func = fn_handler[value];
54913+ if (func == fn_show_state || func == fn_show_ptregs ||
54914+ func == fn_show_mem)
54915+ return;
54916+ }
54917+#endif
54918+
54919 fn_handler[value](vc);
54920 }
54921
54922@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54923 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
54924 return -EFAULT;
54925
54926- if (!capable(CAP_SYS_TTY_CONFIG))
54927- perm = 0;
54928-
54929 switch (cmd) {
54930 case KDGKBENT:
54931 /* Ensure another thread doesn't free it under us */
54932@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54933 spin_unlock_irqrestore(&kbd_event_lock, flags);
54934 return put_user(val, &user_kbe->kb_value);
54935 case KDSKBENT:
54936+ if (!capable(CAP_SYS_TTY_CONFIG))
54937+ perm = 0;
54938+
54939 if (!perm)
54940 return -EPERM;
54941 if (!i && v == K_NOSUCHMAP) {
54942@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54943 int i, j, k;
54944 int ret;
54945
54946- if (!capable(CAP_SYS_TTY_CONFIG))
54947- perm = 0;
54948-
54949 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
54950 if (!kbs) {
54951 ret = -ENOMEM;
54952@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54953 kfree(kbs);
54954 return ((p && *p) ? -EOVERFLOW : 0);
54955 case KDSKBSENT:
54956+ if (!capable(CAP_SYS_TTY_CONFIG))
54957+ perm = 0;
54958+
54959 if (!perm) {
54960 ret = -EPERM;
54961 goto reterr;
54962diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
54963index a673e5b..36e5d32 100644
54964--- a/drivers/uio/uio.c
54965+++ b/drivers/uio/uio.c
54966@@ -25,6 +25,7 @@
54967 #include <linux/kobject.h>
54968 #include <linux/cdev.h>
54969 #include <linux/uio_driver.h>
54970+#include <asm/local.h>
54971
54972 #define UIO_MAX_DEVICES (1U << MINORBITS)
54973
54974@@ -32,7 +33,7 @@ struct uio_device {
54975 struct module *owner;
54976 struct device *dev;
54977 int minor;
54978- atomic_t event;
54979+ atomic_unchecked_t event;
54980 struct fasync_struct *async_queue;
54981 wait_queue_head_t wait;
54982 struct uio_info *info;
54983@@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev,
54984 struct device_attribute *attr, char *buf)
54985 {
54986 struct uio_device *idev = dev_get_drvdata(dev);
54987- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
54988+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
54989 }
54990 static DEVICE_ATTR_RO(event);
54991
54992@@ -405,7 +406,7 @@ void uio_event_notify(struct uio_info *info)
54993 {
54994 struct uio_device *idev = info->uio_dev;
54995
54996- atomic_inc(&idev->event);
54997+ atomic_inc_unchecked(&idev->event);
54998 wake_up_interruptible(&idev->wait);
54999 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
55000 }
55001@@ -458,7 +459,7 @@ static int uio_open(struct inode *inode, struct file *filep)
55002 }
55003
55004 listener->dev = idev;
55005- listener->event_count = atomic_read(&idev->event);
55006+ listener->event_count = atomic_read_unchecked(&idev->event);
55007 filep->private_data = listener;
55008
55009 if (idev->info->open) {
55010@@ -509,7 +510,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
55011 return -EIO;
55012
55013 poll_wait(filep, &idev->wait, wait);
55014- if (listener->event_count != atomic_read(&idev->event))
55015+ if (listener->event_count != atomic_read_unchecked(&idev->event))
55016 return POLLIN | POLLRDNORM;
55017 return 0;
55018 }
55019@@ -534,7 +535,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
55020 do {
55021 set_current_state(TASK_INTERRUPTIBLE);
55022
55023- event_count = atomic_read(&idev->event);
55024+ event_count = atomic_read_unchecked(&idev->event);
55025 if (event_count != listener->event_count) {
55026 if (copy_to_user(buf, &event_count, count))
55027 retval = -EFAULT;
55028@@ -591,9 +592,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
55029 static int uio_find_mem_index(struct vm_area_struct *vma)
55030 {
55031 struct uio_device *idev = vma->vm_private_data;
55032+ unsigned long size;
55033
55034 if (vma->vm_pgoff < MAX_UIO_MAPS) {
55035- if (idev->info->mem[vma->vm_pgoff].size == 0)
55036+ size = idev->info->mem[vma->vm_pgoff].size;
55037+ if (size == 0)
55038+ return -1;
55039+ if (vma->vm_end - vma->vm_start > size)
55040 return -1;
55041 return (int)vma->vm_pgoff;
55042 }
55043@@ -825,7 +830,7 @@ int __uio_register_device(struct module *owner,
55044 idev->owner = owner;
55045 idev->info = info;
55046 init_waitqueue_head(&idev->wait);
55047- atomic_set(&idev->event, 0);
55048+ atomic_set_unchecked(&idev->event, 0);
55049
55050 ret = uio_get_minor(idev);
55051 if (ret)
55052diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
55053index 813d4d3..a71934f 100644
55054--- a/drivers/usb/atm/cxacru.c
55055+++ b/drivers/usb/atm/cxacru.c
55056@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
55057 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
55058 if (ret < 2)
55059 return -EINVAL;
55060- if (index < 0 || index > 0x7f)
55061+ if (index > 0x7f)
55062 return -EINVAL;
55063 pos += tmp;
55064
55065diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
55066index dada014..1d0d517 100644
55067--- a/drivers/usb/atm/usbatm.c
55068+++ b/drivers/usb/atm/usbatm.c
55069@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55070 if (printk_ratelimit())
55071 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
55072 __func__, vpi, vci);
55073- atomic_inc(&vcc->stats->rx_err);
55074+ atomic_inc_unchecked(&vcc->stats->rx_err);
55075 return;
55076 }
55077
55078@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55079 if (length > ATM_MAX_AAL5_PDU) {
55080 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
55081 __func__, length, vcc);
55082- atomic_inc(&vcc->stats->rx_err);
55083+ atomic_inc_unchecked(&vcc->stats->rx_err);
55084 goto out;
55085 }
55086
55087@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55088 if (sarb->len < pdu_length) {
55089 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
55090 __func__, pdu_length, sarb->len, vcc);
55091- atomic_inc(&vcc->stats->rx_err);
55092+ atomic_inc_unchecked(&vcc->stats->rx_err);
55093 goto out;
55094 }
55095
55096 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
55097 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
55098 __func__, vcc);
55099- atomic_inc(&vcc->stats->rx_err);
55100+ atomic_inc_unchecked(&vcc->stats->rx_err);
55101 goto out;
55102 }
55103
55104@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55105 if (printk_ratelimit())
55106 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
55107 __func__, length);
55108- atomic_inc(&vcc->stats->rx_drop);
55109+ atomic_inc_unchecked(&vcc->stats->rx_drop);
55110 goto out;
55111 }
55112
55113@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55114
55115 vcc->push(vcc, skb);
55116
55117- atomic_inc(&vcc->stats->rx);
55118+ atomic_inc_unchecked(&vcc->stats->rx);
55119 out:
55120 skb_trim(sarb, 0);
55121 }
55122@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
55123 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
55124
55125 usbatm_pop(vcc, skb);
55126- atomic_inc(&vcc->stats->tx);
55127+ atomic_inc_unchecked(&vcc->stats->tx);
55128
55129 skb = skb_dequeue(&instance->sndqueue);
55130 }
55131@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
55132 if (!left--)
55133 return sprintf(page,
55134 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
55135- atomic_read(&atm_dev->stats.aal5.tx),
55136- atomic_read(&atm_dev->stats.aal5.tx_err),
55137- atomic_read(&atm_dev->stats.aal5.rx),
55138- atomic_read(&atm_dev->stats.aal5.rx_err),
55139- atomic_read(&atm_dev->stats.aal5.rx_drop));
55140+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
55141+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
55142+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
55143+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
55144+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
55145
55146 if (!left--) {
55147 if (instance->disconnected)
55148diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
55149index 2a3bbdf..91d72cf 100644
55150--- a/drivers/usb/core/devices.c
55151+++ b/drivers/usb/core/devices.c
55152@@ -126,7 +126,7 @@ static const char format_endpt[] =
55153 * time it gets called.
55154 */
55155 static struct device_connect_event {
55156- atomic_t count;
55157+ atomic_unchecked_t count;
55158 wait_queue_head_t wait;
55159 } device_event = {
55160 .count = ATOMIC_INIT(1),
55161@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
55162
55163 void usbfs_conn_disc_event(void)
55164 {
55165- atomic_add(2, &device_event.count);
55166+ atomic_add_unchecked(2, &device_event.count);
55167 wake_up(&device_event.wait);
55168 }
55169
55170@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
55171
55172 poll_wait(file, &device_event.wait, wait);
55173
55174- event_count = atomic_read(&device_event.count);
55175+ event_count = atomic_read_unchecked(&device_event.count);
55176 if (file->f_version != event_count) {
55177 file->f_version = event_count;
55178 return POLLIN | POLLRDNORM;
55179diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
55180index 257876e..4304364 100644
55181--- a/drivers/usb/core/devio.c
55182+++ b/drivers/usb/core/devio.c
55183@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55184 struct usb_dev_state *ps = file->private_data;
55185 struct usb_device *dev = ps->dev;
55186 ssize_t ret = 0;
55187- unsigned len;
55188+ size_t len;
55189 loff_t pos;
55190 int i;
55191
55192@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55193 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
55194 struct usb_config_descriptor *config =
55195 (struct usb_config_descriptor *)dev->rawdescriptors[i];
55196- unsigned int length = le16_to_cpu(config->wTotalLength);
55197+ size_t length = le16_to_cpu(config->wTotalLength);
55198
55199 if (*ppos < pos + length) {
55200
55201 /* The descriptor may claim to be longer than it
55202 * really is. Here is the actual allocated length. */
55203- unsigned alloclen =
55204+ size_t alloclen =
55205 le16_to_cpu(dev->config[i].desc.wTotalLength);
55206
55207- len = length - (*ppos - pos);
55208+ len = length + pos - *ppos;
55209 if (len > nbytes)
55210 len = nbytes;
55211
55212 /* Simply don't write (skip over) unallocated parts */
55213 if (alloclen > (*ppos - pos)) {
55214- alloclen -= (*ppos - pos);
55215+ alloclen = alloclen + pos - *ppos;
55216 if (copy_to_user(buf,
55217 dev->rawdescriptors[i] + (*ppos - pos),
55218 min(len, alloclen))) {
55219diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
55220index bec31e2..b8091cd 100644
55221--- a/drivers/usb/core/hcd.c
55222+++ b/drivers/usb/core/hcd.c
55223@@ -1554,7 +1554,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55224 */
55225 usb_get_urb(urb);
55226 atomic_inc(&urb->use_count);
55227- atomic_inc(&urb->dev->urbnum);
55228+ atomic_inc_unchecked(&urb->dev->urbnum);
55229 usbmon_urb_submit(&hcd->self, urb);
55230
55231 /* NOTE requirements on root-hub callers (usbfs and the hub
55232@@ -1581,7 +1581,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55233 urb->hcpriv = NULL;
55234 INIT_LIST_HEAD(&urb->urb_list);
55235 atomic_dec(&urb->use_count);
55236- atomic_dec(&urb->dev->urbnum);
55237+ atomic_dec_unchecked(&urb->dev->urbnum);
55238 if (atomic_read(&urb->reject))
55239 wake_up(&usb_kill_urb_queue);
55240 usb_put_urb(urb);
55241diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
55242index 0e950ad..a2be5b0 100644
55243--- a/drivers/usb/core/hub.c
55244+++ b/drivers/usb/core/hub.c
55245@@ -27,6 +27,7 @@
55246 #include <linux/freezer.h>
55247 #include <linux/random.h>
55248 #include <linux/pm_qos.h>
55249+#include <linux/grsecurity.h>
55250
55251 #include <asm/uaccess.h>
55252 #include <asm/byteorder.h>
55253@@ -4594,6 +4595,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
55254 goto done;
55255 return;
55256 }
55257+
55258+ if (gr_handle_new_usb())
55259+ goto done;
55260+
55261 if (hub_is_superspeed(hub->hdev))
55262 unit_load = 150;
55263 else
55264diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
55265index 0c8a7fc..c45b40a 100644
55266--- a/drivers/usb/core/message.c
55267+++ b/drivers/usb/core/message.c
55268@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
55269 * Return: If successful, the number of bytes transferred. Otherwise, a negative
55270 * error number.
55271 */
55272-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55273+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55274 __u8 requesttype, __u16 value, __u16 index, void *data,
55275 __u16 size, int timeout)
55276 {
55277@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
55278 * If successful, 0. Otherwise a negative error number. The number of actual
55279 * bytes transferred will be stored in the @actual_length parameter.
55280 */
55281-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55282+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55283 void *data, int len, int *actual_length, int timeout)
55284 {
55285 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
55286@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
55287 * bytes transferred will be stored in the @actual_length parameter.
55288 *
55289 */
55290-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55291+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55292 void *data, int len, int *actual_length, int timeout)
55293 {
55294 struct urb *urb;
55295diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
55296index 1236c60..d47a51c 100644
55297--- a/drivers/usb/core/sysfs.c
55298+++ b/drivers/usb/core/sysfs.c
55299@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
55300 struct usb_device *udev;
55301
55302 udev = to_usb_device(dev);
55303- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
55304+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
55305 }
55306 static DEVICE_ATTR_RO(urbnum);
55307
55308diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
55309index 4d11449..f4ccabf 100644
55310--- a/drivers/usb/core/usb.c
55311+++ b/drivers/usb/core/usb.c
55312@@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
55313 set_dev_node(&dev->dev, dev_to_node(bus->controller));
55314 dev->state = USB_STATE_ATTACHED;
55315 dev->lpm_disable_count = 1;
55316- atomic_set(&dev->urbnum, 0);
55317+ atomic_set_unchecked(&dev->urbnum, 0);
55318
55319 INIT_LIST_HEAD(&dev->ep0.urb_list);
55320 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
55321diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
55322index dab7927..6f53afc 100644
55323--- a/drivers/usb/dwc3/gadget.c
55324+++ b/drivers/usb/dwc3/gadget.c
55325@@ -615,8 +615,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
55326 if (!usb_endpoint_xfer_isoc(desc))
55327 return 0;
55328
55329- memset(&trb_link, 0, sizeof(trb_link));
55330-
55331 /* Link TRB for ISOC. The HWO bit is never reset */
55332 trb_st_hw = &dep->trb_pool[0];
55333
55334diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
55335index 8cfc319..4868255 100644
55336--- a/drivers/usb/early/ehci-dbgp.c
55337+++ b/drivers/usb/early/ehci-dbgp.c
55338@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
55339
55340 #ifdef CONFIG_KGDB
55341 static struct kgdb_io kgdbdbgp_io_ops;
55342-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
55343+static struct kgdb_io kgdbdbgp_io_ops_console;
55344+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
55345 #else
55346 #define dbgp_kgdb_mode (0)
55347 #endif
55348@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
55349 .write_char = kgdbdbgp_write_char,
55350 };
55351
55352+static struct kgdb_io kgdbdbgp_io_ops_console = {
55353+ .name = "kgdbdbgp",
55354+ .read_char = kgdbdbgp_read_char,
55355+ .write_char = kgdbdbgp_write_char,
55356+ .is_console = 1
55357+};
55358+
55359 static int kgdbdbgp_wait_time;
55360
55361 static int __init kgdbdbgp_parse_config(char *str)
55362@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
55363 ptr++;
55364 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
55365 }
55366- kgdb_register_io_module(&kgdbdbgp_io_ops);
55367- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
55368+ if (early_dbgp_console.index != -1)
55369+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
55370+ else
55371+ kgdb_register_io_module(&kgdbdbgp_io_ops);
55372
55373 return 0;
55374 }
55375diff --git a/drivers/usb/gadget/f_uac1.c b/drivers/usb/gadget/f_uac1.c
55376index 2b4c82d..06a8ee6 100644
55377--- a/drivers/usb/gadget/f_uac1.c
55378+++ b/drivers/usb/gadget/f_uac1.c
55379@@ -13,6 +13,7 @@
55380 #include <linux/kernel.h>
55381 #include <linux/device.h>
55382 #include <linux/atomic.h>
55383+#include <linux/module.h>
55384
55385 #include "u_uac1.h"
55386
55387diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
55388index ad0aca8..8ff84865 100644
55389--- a/drivers/usb/gadget/u_serial.c
55390+++ b/drivers/usb/gadget/u_serial.c
55391@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55392 spin_lock_irq(&port->port_lock);
55393
55394 /* already open? Great. */
55395- if (port->port.count) {
55396+ if (atomic_read(&port->port.count)) {
55397 status = 0;
55398- port->port.count++;
55399+ atomic_inc(&port->port.count);
55400
55401 /* currently opening/closing? wait ... */
55402 } else if (port->openclose) {
55403@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55404 tty->driver_data = port;
55405 port->port.tty = tty;
55406
55407- port->port.count = 1;
55408+ atomic_set(&port->port.count, 1);
55409 port->openclose = false;
55410
55411 /* if connected, start the I/O stream */
55412@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55413
55414 spin_lock_irq(&port->port_lock);
55415
55416- if (port->port.count != 1) {
55417- if (port->port.count == 0)
55418+ if (atomic_read(&port->port.count) != 1) {
55419+ if (atomic_read(&port->port.count) == 0)
55420 WARN_ON(1);
55421 else
55422- --port->port.count;
55423+ atomic_dec(&port->port.count);
55424 goto exit;
55425 }
55426
55427@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55428 * and sleep if necessary
55429 */
55430 port->openclose = true;
55431- port->port.count = 0;
55432+ atomic_set(&port->port.count, 0);
55433
55434 gser = port->port_usb;
55435 if (gser && gser->disconnect)
55436@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
55437 int cond;
55438
55439 spin_lock_irq(&port->port_lock);
55440- cond = (port->port.count == 0) && !port->openclose;
55441+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
55442 spin_unlock_irq(&port->port_lock);
55443 return cond;
55444 }
55445@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
55446 /* if it's already open, start I/O ... and notify the serial
55447 * protocol about open/close status (connect/disconnect).
55448 */
55449- if (port->port.count) {
55450+ if (atomic_read(&port->port.count)) {
55451 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
55452 gs_start_io(port);
55453 if (gser->connect)
55454@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
55455
55456 port->port_usb = NULL;
55457 gser->ioport = NULL;
55458- if (port->port.count > 0 || port->openclose) {
55459+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
55460 wake_up_interruptible(&port->drain_wait);
55461 if (port->port.tty)
55462 tty_hangup(port->port.tty);
55463@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
55464
55465 /* finally, free any unused/unusable I/O buffers */
55466 spin_lock_irqsave(&port->port_lock, flags);
55467- if (port->port.count == 0 && !port->openclose)
55468+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
55469 gs_buf_free(&port->port_write_buf);
55470 gs_free_requests(gser->out, &port->read_pool, NULL);
55471 gs_free_requests(gser->out, &port->read_queue, NULL);
55472diff --git a/drivers/usb/gadget/u_uac1.c b/drivers/usb/gadget/u_uac1.c
55473index 7a55fea..cc0ed4f 100644
55474--- a/drivers/usb/gadget/u_uac1.c
55475+++ b/drivers/usb/gadget/u_uac1.c
55476@@ -16,6 +16,7 @@
55477 #include <linux/ctype.h>
55478 #include <linux/random.h>
55479 #include <linux/syscalls.h>
55480+#include <linux/module.h>
55481
55482 #include "u_uac1.h"
55483
55484diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
55485index cc305c7..cf6da4a 100644
55486--- a/drivers/usb/host/ehci-hub.c
55487+++ b/drivers/usb/host/ehci-hub.c
55488@@ -771,7 +771,7 @@ static struct urb *request_single_step_set_feature_urb(
55489 urb->transfer_flags = URB_DIR_IN;
55490 usb_get_urb(urb);
55491 atomic_inc(&urb->use_count);
55492- atomic_inc(&urb->dev->urbnum);
55493+ atomic_inc_unchecked(&urb->dev->urbnum);
55494 urb->setup_dma = dma_map_single(
55495 hcd->self.controller,
55496 urb->setup_packet,
55497@@ -838,7 +838,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
55498 urb->status = -EINPROGRESS;
55499 usb_get_urb(urb);
55500 atomic_inc(&urb->use_count);
55501- atomic_inc(&urb->dev->urbnum);
55502+ atomic_inc_unchecked(&urb->dev->urbnum);
55503 retval = submit_single_step_set_feature(hcd, urb, 0);
55504 if (!retval && !wait_for_completion_timeout(&done,
55505 msecs_to_jiffies(2000))) {
55506diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
55507index d0d8fad..668ef7b 100644
55508--- a/drivers/usb/host/hwa-hc.c
55509+++ b/drivers/usb/host/hwa-hc.c
55510@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55511 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
55512 struct wahc *wa = &hwahc->wa;
55513 struct device *dev = &wa->usb_iface->dev;
55514- u8 mas_le[UWB_NUM_MAS/8];
55515+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
55516+
55517+ if (mas_le == NULL)
55518+ return -ENOMEM;
55519
55520 /* Set the stream index */
55521 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
55522@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55523 WUSB_REQ_SET_WUSB_MAS,
55524 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
55525 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
55526- mas_le, 32, USB_CTRL_SET_TIMEOUT);
55527+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
55528 if (result < 0)
55529 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
55530 out:
55531+ kfree(mas_le);
55532+
55533 return result;
55534 }
55535
55536diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
55537index b3d245e..99549ed 100644
55538--- a/drivers/usb/misc/appledisplay.c
55539+++ b/drivers/usb/misc/appledisplay.c
55540@@ -84,7 +84,7 @@ struct appledisplay {
55541 struct mutex sysfslock; /* concurrent read and write */
55542 };
55543
55544-static atomic_t count_displays = ATOMIC_INIT(0);
55545+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
55546 static struct workqueue_struct *wq;
55547
55548 static void appledisplay_complete(struct urb *urb)
55549@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
55550
55551 /* Register backlight device */
55552 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
55553- atomic_inc_return(&count_displays) - 1);
55554+ atomic_inc_return_unchecked(&count_displays) - 1);
55555 memset(&props, 0, sizeof(struct backlight_properties));
55556 props.type = BACKLIGHT_RAW;
55557 props.max_brightness = 0xff;
55558diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
55559index 8d7fc48..01c4986 100644
55560--- a/drivers/usb/serial/console.c
55561+++ b/drivers/usb/serial/console.c
55562@@ -123,7 +123,7 @@ static int usb_console_setup(struct console *co, char *options)
55563
55564 info->port = port;
55565
55566- ++port->port.count;
55567+ atomic_inc(&port->port.count);
55568 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
55569 if (serial->type->set_termios) {
55570 /*
55571@@ -167,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
55572 }
55573 /* Now that any required fake tty operations are completed restore
55574 * the tty port count */
55575- --port->port.count;
55576+ atomic_dec(&port->port.count);
55577 /* The console is special in terms of closing the device so
55578 * indicate this port is now acting as a system console. */
55579 port->port.console = 1;
55580@@ -180,7 +180,7 @@ static int usb_console_setup(struct console *co, char *options)
55581 free_tty:
55582 kfree(tty);
55583 reset_open_count:
55584- port->port.count = 0;
55585+ atomic_set(&port->port.count, 0);
55586 usb_autopm_put_interface(serial->interface);
55587 error_get_interface:
55588 usb_serial_put(serial);
55589@@ -191,7 +191,7 @@ static int usb_console_setup(struct console *co, char *options)
55590 static void usb_console_write(struct console *co,
55591 const char *buf, unsigned count)
55592 {
55593- static struct usbcons_info *info = &usbcons_info;
55594+ struct usbcons_info *info = &usbcons_info;
55595 struct usb_serial_port *port = info->port;
55596 struct usb_serial *serial;
55597 int retval = -ENODEV;
55598diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
55599index 307e339..6aa97cb 100644
55600--- a/drivers/usb/storage/usb.h
55601+++ b/drivers/usb/storage/usb.h
55602@@ -63,7 +63,7 @@ struct us_unusual_dev {
55603 __u8 useProtocol;
55604 __u8 useTransport;
55605 int (*initFunction)(struct us_data *);
55606-};
55607+} __do_const;
55608
55609
55610 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
55611diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
55612index f2a8d29..7bc3fe7 100644
55613--- a/drivers/usb/wusbcore/wa-hc.h
55614+++ b/drivers/usb/wusbcore/wa-hc.h
55615@@ -240,7 +240,7 @@ struct wahc {
55616 spinlock_t xfer_list_lock;
55617 struct work_struct xfer_enqueue_work;
55618 struct work_struct xfer_error_work;
55619- atomic_t xfer_id_count;
55620+ atomic_unchecked_t xfer_id_count;
55621
55622 kernel_ulong_t quirks;
55623 };
55624@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
55625 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
55626 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
55627 wa->dto_in_use = 0;
55628- atomic_set(&wa->xfer_id_count, 1);
55629+ atomic_set_unchecked(&wa->xfer_id_count, 1);
55630 /* init the buf in URBs */
55631 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
55632 usb_init_urb(&(wa->buf_in_urbs[index]));
55633diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
55634index 3e2e4ed..060c9b8 100644
55635--- a/drivers/usb/wusbcore/wa-xfer.c
55636+++ b/drivers/usb/wusbcore/wa-xfer.c
55637@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
55638 */
55639 static void wa_xfer_id_init(struct wa_xfer *xfer)
55640 {
55641- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
55642+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
55643 }
55644
55645 /* Return the xfer's ID. */
55646diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
55647index f018d8d..ccab63f 100644
55648--- a/drivers/vfio/vfio.c
55649+++ b/drivers/vfio/vfio.c
55650@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
55651 return 0;
55652
55653 /* TODO Prevent device auto probing */
55654- WARN("Device %s added to live group %d!\n", dev_name(dev),
55655+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
55656 iommu_group_id(group->iommu_group));
55657
55658 return 0;
55659diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
55660index 5174eba..451e6bc 100644
55661--- a/drivers/vhost/vringh.c
55662+++ b/drivers/vhost/vringh.c
55663@@ -530,17 +530,17 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
55664 /* Userspace access helpers: in this case, addresses are really userspace. */
55665 static inline int getu16_user(u16 *val, const u16 *p)
55666 {
55667- return get_user(*val, (__force u16 __user *)p);
55668+ return get_user(*val, (u16 __force_user *)p);
55669 }
55670
55671 static inline int putu16_user(u16 *p, u16 val)
55672 {
55673- return put_user(val, (__force u16 __user *)p);
55674+ return put_user(val, (u16 __force_user *)p);
55675 }
55676
55677 static inline int copydesc_user(void *dst, const void *src, size_t len)
55678 {
55679- return copy_from_user(dst, (__force void __user *)src, len) ?
55680+ return copy_from_user(dst, (void __force_user *)src, len) ?
55681 -EFAULT : 0;
55682 }
55683
55684@@ -548,19 +548,19 @@ static inline int putused_user(struct vring_used_elem *dst,
55685 const struct vring_used_elem *src,
55686 unsigned int num)
55687 {
55688- return copy_to_user((__force void __user *)dst, src,
55689+ return copy_to_user((void __force_user *)dst, src,
55690 sizeof(*dst) * num) ? -EFAULT : 0;
55691 }
55692
55693 static inline int xfer_from_user(void *src, void *dst, size_t len)
55694 {
55695- return copy_from_user(dst, (__force void __user *)src, len) ?
55696+ return copy_from_user(dst, (void __force_user *)src, len) ?
55697 -EFAULT : 0;
55698 }
55699
55700 static inline int xfer_to_user(void *dst, void *src, size_t len)
55701 {
55702- return copy_to_user((__force void __user *)dst, src, len) ?
55703+ return copy_to_user((void __force_user *)dst, src, len) ?
55704 -EFAULT : 0;
55705 }
55706
55707@@ -596,9 +596,9 @@ int vringh_init_user(struct vringh *vrh, u32 features,
55708 vrh->last_used_idx = 0;
55709 vrh->vring.num = num;
55710 /* vring expects kernel addresses, but only used via accessors. */
55711- vrh->vring.desc = (__force struct vring_desc *)desc;
55712- vrh->vring.avail = (__force struct vring_avail *)avail;
55713- vrh->vring.used = (__force struct vring_used *)used;
55714+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
55715+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
55716+ vrh->vring.used = (__force_kernel struct vring_used *)used;
55717 return 0;
55718 }
55719 EXPORT_SYMBOL(vringh_init_user);
55720@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
55721
55722 static inline int putu16_kern(u16 *p, u16 val)
55723 {
55724- ACCESS_ONCE(*p) = val;
55725+ ACCESS_ONCE_RW(*p) = val;
55726 return 0;
55727 }
55728
55729diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
55730index 84a110a..96312c3 100644
55731--- a/drivers/video/backlight/kb3886_bl.c
55732+++ b/drivers/video/backlight/kb3886_bl.c
55733@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
55734 static unsigned long kb3886bl_flags;
55735 #define KB3886BL_SUSPENDED 0x01
55736
55737-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
55738+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
55739 {
55740 .ident = "Sahara Touch-iT",
55741 .matches = {
55742diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
55743index 1b0b233..6f34c2c 100644
55744--- a/drivers/video/fbdev/arcfb.c
55745+++ b/drivers/video/fbdev/arcfb.c
55746@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
55747 return -ENOSPC;
55748
55749 err = 0;
55750- if ((count + p) > fbmemlength) {
55751+ if (count > (fbmemlength - p)) {
55752 count = fbmemlength - p;
55753 err = -ENOSPC;
55754 }
55755diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
55756index 52108be..c7c110d 100644
55757--- a/drivers/video/fbdev/aty/aty128fb.c
55758+++ b/drivers/video/fbdev/aty/aty128fb.c
55759@@ -149,7 +149,7 @@ enum {
55760 };
55761
55762 /* Must match above enum */
55763-static char * const r128_family[] = {
55764+static const char * const r128_family[] = {
55765 "AGP",
55766 "PCI",
55767 "PRO AGP",
55768diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
55769index c3d0074..0b9077e 100644
55770--- a/drivers/video/fbdev/aty/atyfb_base.c
55771+++ b/drivers/video/fbdev/aty/atyfb_base.c
55772@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
55773 par->accel_flags = var->accel_flags; /* hack */
55774
55775 if (var->accel_flags) {
55776- info->fbops->fb_sync = atyfb_sync;
55777+ pax_open_kernel();
55778+ *(void **)&info->fbops->fb_sync = atyfb_sync;
55779+ pax_close_kernel();
55780 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55781 } else {
55782- info->fbops->fb_sync = NULL;
55783+ pax_open_kernel();
55784+ *(void **)&info->fbops->fb_sync = NULL;
55785+ pax_close_kernel();
55786 info->flags |= FBINFO_HWACCEL_DISABLED;
55787 }
55788
55789diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
55790index 2fa0317..4983f2a 100644
55791--- a/drivers/video/fbdev/aty/mach64_cursor.c
55792+++ b/drivers/video/fbdev/aty/mach64_cursor.c
55793@@ -8,6 +8,7 @@
55794 #include "../core/fb_draw.h"
55795
55796 #include <asm/io.h>
55797+#include <asm/pgtable.h>
55798
55799 #ifdef __sparc__
55800 #include <asm/fbio.h>
55801@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
55802 info->sprite.buf_align = 16; /* and 64 lines tall. */
55803 info->sprite.flags = FB_PIXMAP_IO;
55804
55805- info->fbops->fb_cursor = atyfb_cursor;
55806+ pax_open_kernel();
55807+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
55808+ pax_close_kernel();
55809
55810 return 0;
55811 }
55812diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
55813index 900aa4e..6d49418 100644
55814--- a/drivers/video/fbdev/core/fb_defio.c
55815+++ b/drivers/video/fbdev/core/fb_defio.c
55816@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
55817
55818 BUG_ON(!fbdefio);
55819 mutex_init(&fbdefio->lock);
55820- info->fbops->fb_mmap = fb_deferred_io_mmap;
55821+ pax_open_kernel();
55822+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
55823+ pax_close_kernel();
55824 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
55825 INIT_LIST_HEAD(&fbdefio->pagelist);
55826 if (fbdefio->delay == 0) /* set a default of 1 s */
55827@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
55828 page->mapping = NULL;
55829 }
55830
55831- info->fbops->fb_mmap = NULL;
55832+ *(void **)&info->fbops->fb_mmap = NULL;
55833 mutex_destroy(&fbdefio->lock);
55834 }
55835 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
55836diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
55837index b5e85f6..290f8c7 100644
55838--- a/drivers/video/fbdev/core/fbmem.c
55839+++ b/drivers/video/fbdev/core/fbmem.c
55840@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
55841 __u32 data;
55842 int err;
55843
55844- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
55845+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
55846
55847 data = (__u32) (unsigned long) fix->smem_start;
55848 err |= put_user(data, &fix32->smem_start);
55849diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
55850index e23392e..8a77540 100644
55851--- a/drivers/video/fbdev/hyperv_fb.c
55852+++ b/drivers/video/fbdev/hyperv_fb.c
55853@@ -235,7 +235,7 @@ static uint screen_fb_size;
55854 static inline int synthvid_send(struct hv_device *hdev,
55855 struct synthvid_msg *msg)
55856 {
55857- static atomic64_t request_id = ATOMIC64_INIT(0);
55858+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
55859 int ret;
55860
55861 msg->pipe_hdr.type = PIPE_MSG_DATA;
55862@@ -243,7 +243,7 @@ static inline int synthvid_send(struct hv_device *hdev,
55863
55864 ret = vmbus_sendpacket(hdev->channel, msg,
55865 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
55866- atomic64_inc_return(&request_id),
55867+ atomic64_inc_return_unchecked(&request_id),
55868 VM_PKT_DATA_INBAND, 0);
55869
55870 if (ret)
55871diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
55872index 7672d2e..b56437f 100644
55873--- a/drivers/video/fbdev/i810/i810_accel.c
55874+++ b/drivers/video/fbdev/i810/i810_accel.c
55875@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
55876 }
55877 }
55878 printk("ringbuffer lockup!!!\n");
55879+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
55880 i810_report_error(mmio);
55881 par->dev_flags |= LOCKUP;
55882 info->pixmap.scan_align = 1;
55883diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55884index a01147f..5d896f8 100644
55885--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55886+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55887@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
55888
55889 #ifdef CONFIG_FB_MATROX_MYSTIQUE
55890 struct matrox_switch matrox_mystique = {
55891- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
55892+ .preinit = MGA1064_preinit,
55893+ .reset = MGA1064_reset,
55894+ .init = MGA1064_init,
55895+ .restore = MGA1064_restore,
55896 };
55897 EXPORT_SYMBOL(matrox_mystique);
55898 #endif
55899
55900 #ifdef CONFIG_FB_MATROX_G
55901 struct matrox_switch matrox_G100 = {
55902- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
55903+ .preinit = MGAG100_preinit,
55904+ .reset = MGAG100_reset,
55905+ .init = MGAG100_init,
55906+ .restore = MGAG100_restore,
55907 };
55908 EXPORT_SYMBOL(matrox_G100);
55909 #endif
55910diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55911index 195ad7c..09743fc 100644
55912--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55913+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55914@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
55915 }
55916
55917 struct matrox_switch matrox_millennium = {
55918- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
55919+ .preinit = Ti3026_preinit,
55920+ .reset = Ti3026_reset,
55921+ .init = Ti3026_init,
55922+ .restore = Ti3026_restore
55923 };
55924 EXPORT_SYMBOL(matrox_millennium);
55925 #endif
55926diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55927index fe92eed..106e085 100644
55928--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55929+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55930@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
55931 struct mb862xxfb_par *par = info->par;
55932
55933 if (info->var.bits_per_pixel == 32) {
55934- info->fbops->fb_fillrect = cfb_fillrect;
55935- info->fbops->fb_copyarea = cfb_copyarea;
55936- info->fbops->fb_imageblit = cfb_imageblit;
55937+ pax_open_kernel();
55938+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55939+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55940+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55941+ pax_close_kernel();
55942 } else {
55943 outreg(disp, GC_L0EM, 3);
55944- info->fbops->fb_fillrect = mb86290fb_fillrect;
55945- info->fbops->fb_copyarea = mb86290fb_copyarea;
55946- info->fbops->fb_imageblit = mb86290fb_imageblit;
55947+ pax_open_kernel();
55948+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
55949+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
55950+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
55951+ pax_close_kernel();
55952 }
55953 outreg(draw, GDC_REG_DRAW_BASE, 0);
55954 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
55955diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
55956index def0412..fed6529 100644
55957--- a/drivers/video/fbdev/nvidia/nvidia.c
55958+++ b/drivers/video/fbdev/nvidia/nvidia.c
55959@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
55960 info->fix.line_length = (info->var.xres_virtual *
55961 info->var.bits_per_pixel) >> 3;
55962 if (info->var.accel_flags) {
55963- info->fbops->fb_imageblit = nvidiafb_imageblit;
55964- info->fbops->fb_fillrect = nvidiafb_fillrect;
55965- info->fbops->fb_copyarea = nvidiafb_copyarea;
55966- info->fbops->fb_sync = nvidiafb_sync;
55967+ pax_open_kernel();
55968+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
55969+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
55970+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
55971+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
55972+ pax_close_kernel();
55973 info->pixmap.scan_align = 4;
55974 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55975 info->flags |= FBINFO_READS_FAST;
55976 NVResetGraphics(info);
55977 } else {
55978- info->fbops->fb_imageblit = cfb_imageblit;
55979- info->fbops->fb_fillrect = cfb_fillrect;
55980- info->fbops->fb_copyarea = cfb_copyarea;
55981- info->fbops->fb_sync = NULL;
55982+ pax_open_kernel();
55983+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55984+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55985+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55986+ *(void **)&info->fbops->fb_sync = NULL;
55987+ pax_close_kernel();
55988 info->pixmap.scan_align = 1;
55989 info->flags |= FBINFO_HWACCEL_DISABLED;
55990 info->flags &= ~FBINFO_READS_FAST;
55991@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
55992 info->pixmap.size = 8 * 1024;
55993 info->pixmap.flags = FB_PIXMAP_SYSTEM;
55994
55995- if (!hwcur)
55996- info->fbops->fb_cursor = NULL;
55997+ if (!hwcur) {
55998+ pax_open_kernel();
55999+ *(void **)&info->fbops->fb_cursor = NULL;
56000+ pax_close_kernel();
56001+ }
56002
56003 info->var.accel_flags = (!noaccel);
56004
56005diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
56006index 2412a0d..294215b 100644
56007--- a/drivers/video/fbdev/omap2/dss/display.c
56008+++ b/drivers/video/fbdev/omap2/dss/display.c
56009@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
56010 if (dssdev->name == NULL)
56011 dssdev->name = dssdev->alias;
56012
56013+ pax_open_kernel();
56014 if (drv && drv->get_resolution == NULL)
56015- drv->get_resolution = omapdss_default_get_resolution;
56016+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
56017 if (drv && drv->get_recommended_bpp == NULL)
56018- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56019+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56020 if (drv && drv->get_timings == NULL)
56021- drv->get_timings = omapdss_default_get_timings;
56022+ *(void **)&drv->get_timings = omapdss_default_get_timings;
56023+ pax_close_kernel();
56024
56025 mutex_lock(&panel_list_mutex);
56026 list_add_tail(&dssdev->panel_list, &panel_list);
56027diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
56028index 83433cb..71e9b98 100644
56029--- a/drivers/video/fbdev/s1d13xxxfb.c
56030+++ b/drivers/video/fbdev/s1d13xxxfb.c
56031@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
56032
56033 switch(prod_id) {
56034 case S1D13506_PROD_ID: /* activate acceleration */
56035- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56036- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56037+ pax_open_kernel();
56038+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56039+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56040+ pax_close_kernel();
56041 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
56042 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
56043 break;
56044diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56045index 2bcc84a..29dd1ea 100644
56046--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
56047+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56048@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
56049 }
56050
56051 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
56052- lcdc_sys_write_index,
56053- lcdc_sys_write_data,
56054- lcdc_sys_read_data,
56055+ .write_index = lcdc_sys_write_index,
56056+ .write_data = lcdc_sys_write_data,
56057+ .read_data = lcdc_sys_read_data,
56058 };
56059
56060 static int sh_mobile_lcdc_sginit(struct fb_info *info,
56061diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
56062index d513ed6..90b0de9 100644
56063--- a/drivers/video/fbdev/smscufx.c
56064+++ b/drivers/video/fbdev/smscufx.c
56065@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
56066 fb_deferred_io_cleanup(info);
56067 kfree(info->fbdefio);
56068 info->fbdefio = NULL;
56069- info->fbops->fb_mmap = ufx_ops_mmap;
56070+ pax_open_kernel();
56071+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
56072+ pax_close_kernel();
56073 }
56074
56075 pr_debug("released /dev/fb%d user=%d count=%d",
56076diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
56077index 77b890e..458e666 100644
56078--- a/drivers/video/fbdev/udlfb.c
56079+++ b/drivers/video/fbdev/udlfb.c
56080@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
56081 dlfb_urb_completion(urb);
56082
56083 error:
56084- atomic_add(bytes_sent, &dev->bytes_sent);
56085- atomic_add(bytes_identical, &dev->bytes_identical);
56086- atomic_add(width*height*2, &dev->bytes_rendered);
56087+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56088+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56089+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
56090 end_cycles = get_cycles();
56091- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56092+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56093 >> 10)), /* Kcycles */
56094 &dev->cpu_kcycles_used);
56095
56096@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
56097 dlfb_urb_completion(urb);
56098
56099 error:
56100- atomic_add(bytes_sent, &dev->bytes_sent);
56101- atomic_add(bytes_identical, &dev->bytes_identical);
56102- atomic_add(bytes_rendered, &dev->bytes_rendered);
56103+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56104+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56105+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
56106 end_cycles = get_cycles();
56107- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56108+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56109 >> 10)), /* Kcycles */
56110 &dev->cpu_kcycles_used);
56111 }
56112@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
56113 fb_deferred_io_cleanup(info);
56114 kfree(info->fbdefio);
56115 info->fbdefio = NULL;
56116- info->fbops->fb_mmap = dlfb_ops_mmap;
56117+ pax_open_kernel();
56118+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
56119+ pax_close_kernel();
56120 }
56121
56122 pr_warn("released /dev/fb%d user=%d count=%d\n",
56123@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
56124 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56125 struct dlfb_data *dev = fb_info->par;
56126 return snprintf(buf, PAGE_SIZE, "%u\n",
56127- atomic_read(&dev->bytes_rendered));
56128+ atomic_read_unchecked(&dev->bytes_rendered));
56129 }
56130
56131 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56132@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56133 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56134 struct dlfb_data *dev = fb_info->par;
56135 return snprintf(buf, PAGE_SIZE, "%u\n",
56136- atomic_read(&dev->bytes_identical));
56137+ atomic_read_unchecked(&dev->bytes_identical));
56138 }
56139
56140 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56141@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56142 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56143 struct dlfb_data *dev = fb_info->par;
56144 return snprintf(buf, PAGE_SIZE, "%u\n",
56145- atomic_read(&dev->bytes_sent));
56146+ atomic_read_unchecked(&dev->bytes_sent));
56147 }
56148
56149 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56150@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56151 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56152 struct dlfb_data *dev = fb_info->par;
56153 return snprintf(buf, PAGE_SIZE, "%u\n",
56154- atomic_read(&dev->cpu_kcycles_used));
56155+ atomic_read_unchecked(&dev->cpu_kcycles_used));
56156 }
56157
56158 static ssize_t edid_show(
56159@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
56160 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56161 struct dlfb_data *dev = fb_info->par;
56162
56163- atomic_set(&dev->bytes_rendered, 0);
56164- atomic_set(&dev->bytes_identical, 0);
56165- atomic_set(&dev->bytes_sent, 0);
56166- atomic_set(&dev->cpu_kcycles_used, 0);
56167+ atomic_set_unchecked(&dev->bytes_rendered, 0);
56168+ atomic_set_unchecked(&dev->bytes_identical, 0);
56169+ atomic_set_unchecked(&dev->bytes_sent, 0);
56170+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
56171
56172 return count;
56173 }
56174diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
56175index 509d452..7c9d2de 100644
56176--- a/drivers/video/fbdev/uvesafb.c
56177+++ b/drivers/video/fbdev/uvesafb.c
56178@@ -19,6 +19,7 @@
56179 #include <linux/io.h>
56180 #include <linux/mutex.h>
56181 #include <linux/slab.h>
56182+#include <linux/moduleloader.h>
56183 #include <video/edid.h>
56184 #include <video/uvesafb.h>
56185 #ifdef CONFIG_X86
56186@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
56187 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
56188 par->pmi_setpal = par->ypan = 0;
56189 } else {
56190+
56191+#ifdef CONFIG_PAX_KERNEXEC
56192+#ifdef CONFIG_MODULES
56193+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
56194+#endif
56195+ if (!par->pmi_code) {
56196+ par->pmi_setpal = par->ypan = 0;
56197+ return 0;
56198+ }
56199+#endif
56200+
56201 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
56202 + task->t.regs.edi);
56203+
56204+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56205+ pax_open_kernel();
56206+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
56207+ pax_close_kernel();
56208+
56209+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
56210+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
56211+#else
56212 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
56213 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
56214+#endif
56215+
56216 printk(KERN_INFO "uvesafb: protected mode interface info at "
56217 "%04x:%04x\n",
56218 (u16)task->t.regs.es, (u16)task->t.regs.edi);
56219@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
56220 par->ypan = ypan;
56221
56222 if (par->pmi_setpal || par->ypan) {
56223+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
56224 if (__supported_pte_mask & _PAGE_NX) {
56225 par->pmi_setpal = par->ypan = 0;
56226 printk(KERN_WARNING "uvesafb: NX protection is active, "
56227 "better not use the PMI.\n");
56228- } else {
56229+ } else
56230+#endif
56231 uvesafb_vbe_getpmi(task, par);
56232- }
56233 }
56234 #else
56235 /* The protected mode interface is not available on non-x86. */
56236@@ -1453,8 +1477,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56237 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
56238
56239 /* Disable blanking if the user requested so. */
56240- if (!blank)
56241- info->fbops->fb_blank = NULL;
56242+ if (!blank) {
56243+ pax_open_kernel();
56244+ *(void **)&info->fbops->fb_blank = NULL;
56245+ pax_close_kernel();
56246+ }
56247
56248 /*
56249 * Find out how much IO memory is required for the mode with
56250@@ -1525,8 +1552,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56251 info->flags = FBINFO_FLAG_DEFAULT |
56252 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
56253
56254- if (!par->ypan)
56255- info->fbops->fb_pan_display = NULL;
56256+ if (!par->ypan) {
56257+ pax_open_kernel();
56258+ *(void **)&info->fbops->fb_pan_display = NULL;
56259+ pax_close_kernel();
56260+ }
56261 }
56262
56263 static void uvesafb_init_mtrr(struct fb_info *info)
56264@@ -1787,6 +1817,11 @@ out_mode:
56265 out:
56266 kfree(par->vbe_modes);
56267
56268+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56269+ if (par->pmi_code)
56270+ module_free_exec(NULL, par->pmi_code);
56271+#endif
56272+
56273 framebuffer_release(info);
56274 return err;
56275 }
56276@@ -1811,6 +1846,11 @@ static int uvesafb_remove(struct platform_device *dev)
56277 kfree(par->vbe_state_orig);
56278 kfree(par->vbe_state_saved);
56279
56280+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56281+ if (par->pmi_code)
56282+ module_free_exec(NULL, par->pmi_code);
56283+#endif
56284+
56285 framebuffer_release(info);
56286 }
56287 return 0;
56288diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
56289index 6170e7f..dd63031 100644
56290--- a/drivers/video/fbdev/vesafb.c
56291+++ b/drivers/video/fbdev/vesafb.c
56292@@ -9,6 +9,7 @@
56293 */
56294
56295 #include <linux/module.h>
56296+#include <linux/moduleloader.h>
56297 #include <linux/kernel.h>
56298 #include <linux/errno.h>
56299 #include <linux/string.h>
56300@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
56301 static int vram_total; /* Set total amount of memory */
56302 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
56303 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
56304-static void (*pmi_start)(void) __read_mostly;
56305-static void (*pmi_pal) (void) __read_mostly;
56306+static void (*pmi_start)(void) __read_only;
56307+static void (*pmi_pal) (void) __read_only;
56308 static int depth __read_mostly;
56309 static int vga_compat __read_mostly;
56310 /* --------------------------------------------------------------------- */
56311@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
56312 unsigned int size_remap;
56313 unsigned int size_total;
56314 char *option = NULL;
56315+ void *pmi_code = NULL;
56316
56317 /* ignore error return of fb_get_options */
56318 fb_get_options("vesafb", &option);
56319@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
56320 size_remap = size_total;
56321 vesafb_fix.smem_len = size_remap;
56322
56323-#ifndef __i386__
56324- screen_info.vesapm_seg = 0;
56325-#endif
56326-
56327 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
56328 printk(KERN_WARNING
56329 "vesafb: cannot reserve video memory at 0x%lx\n",
56330@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
56331 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
56332 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
56333
56334+#ifdef __i386__
56335+
56336+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56337+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
56338+ if (!pmi_code)
56339+#elif !defined(CONFIG_PAX_KERNEXEC)
56340+ if (0)
56341+#endif
56342+
56343+#endif
56344+ screen_info.vesapm_seg = 0;
56345+
56346 if (screen_info.vesapm_seg) {
56347- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
56348- screen_info.vesapm_seg,screen_info.vesapm_off);
56349+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
56350+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
56351 }
56352
56353 if (screen_info.vesapm_seg < 0xc000)
56354@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
56355
56356 if (ypan || pmi_setpal) {
56357 unsigned short *pmi_base;
56358+
56359 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
56360- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
56361- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
56362+
56363+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56364+ pax_open_kernel();
56365+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
56366+#else
56367+ pmi_code = pmi_base;
56368+#endif
56369+
56370+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
56371+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
56372+
56373+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56374+ pmi_start = ktva_ktla(pmi_start);
56375+ pmi_pal = ktva_ktla(pmi_pal);
56376+ pax_close_kernel();
56377+#endif
56378+
56379 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
56380 if (pmi_base[3]) {
56381 printk(KERN_INFO "vesafb: pmi: ports = ");
56382@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
56383 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
56384 (ypan ? FBINFO_HWACCEL_YPAN : 0);
56385
56386- if (!ypan)
56387- info->fbops->fb_pan_display = NULL;
56388+ if (!ypan) {
56389+ pax_open_kernel();
56390+ *(void **)&info->fbops->fb_pan_display = NULL;
56391+ pax_close_kernel();
56392+ }
56393
56394 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
56395 err = -ENOMEM;
56396@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
56397 fb_info(info, "%s frame buffer device\n", info->fix.id);
56398 return 0;
56399 err:
56400+
56401+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56402+ module_free_exec(NULL, pmi_code);
56403+#endif
56404+
56405 if (info->screen_base)
56406 iounmap(info->screen_base);
56407 framebuffer_release(info);
56408diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
56409index 88714ae..16c2e11 100644
56410--- a/drivers/video/fbdev/via/via_clock.h
56411+++ b/drivers/video/fbdev/via/via_clock.h
56412@@ -56,7 +56,7 @@ struct via_clock {
56413
56414 void (*set_engine_pll_state)(u8 state);
56415 void (*set_engine_pll)(struct via_pll_config config);
56416-};
56417+} __no_const;
56418
56419
56420 static inline u32 get_pll_internal_frequency(u32 ref_freq,
56421diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
56422index 3c14e43..2630570 100644
56423--- a/drivers/video/logo/logo_linux_clut224.ppm
56424+++ b/drivers/video/logo/logo_linux_clut224.ppm
56425@@ -2,1603 +2,1123 @@ P3
56426 # Standard 224-color Linux logo
56427 80 80
56428 255
56429- 0 0 0 0 0 0 0 0 0 0 0 0
56430- 0 0 0 0 0 0 0 0 0 0 0 0
56431- 0 0 0 0 0 0 0 0 0 0 0 0
56432- 0 0 0 0 0 0 0 0 0 0 0 0
56433- 0 0 0 0 0 0 0 0 0 0 0 0
56434- 0 0 0 0 0 0 0 0 0 0 0 0
56435- 0 0 0 0 0 0 0 0 0 0 0 0
56436- 0 0 0 0 0 0 0 0 0 0 0 0
56437- 0 0 0 0 0 0 0 0 0 0 0 0
56438- 6 6 6 6 6 6 10 10 10 10 10 10
56439- 10 10 10 6 6 6 6 6 6 6 6 6
56440- 0 0 0 0 0 0 0 0 0 0 0 0
56441- 0 0 0 0 0 0 0 0 0 0 0 0
56442- 0 0 0 0 0 0 0 0 0 0 0 0
56443- 0 0 0 0 0 0 0 0 0 0 0 0
56444- 0 0 0 0 0 0 0 0 0 0 0 0
56445- 0 0 0 0 0 0 0 0 0 0 0 0
56446- 0 0 0 0 0 0 0 0 0 0 0 0
56447- 0 0 0 0 0 0 0 0 0 0 0 0
56448- 0 0 0 0 0 0 0 0 0 0 0 0
56449- 0 0 0 0 0 0 0 0 0 0 0 0
56450- 0 0 0 0 0 0 0 0 0 0 0 0
56451- 0 0 0 0 0 0 0 0 0 0 0 0
56452- 0 0 0 0 0 0 0 0 0 0 0 0
56453- 0 0 0 0 0 0 0 0 0 0 0 0
56454- 0 0 0 0 0 0 0 0 0 0 0 0
56455- 0 0 0 0 0 0 0 0 0 0 0 0
56456- 0 0 0 0 0 0 0 0 0 0 0 0
56457- 0 0 0 6 6 6 10 10 10 14 14 14
56458- 22 22 22 26 26 26 30 30 30 34 34 34
56459- 30 30 30 30 30 30 26 26 26 18 18 18
56460- 14 14 14 10 10 10 6 6 6 0 0 0
56461- 0 0 0 0 0 0 0 0 0 0 0 0
56462- 0 0 0 0 0 0 0 0 0 0 0 0
56463- 0 0 0 0 0 0 0 0 0 0 0 0
56464- 0 0 0 0 0 0 0 0 0 0 0 0
56465- 0 0 0 0 0 0 0 0 0 0 0 0
56466- 0 0 0 0 0 0 0 0 0 0 0 0
56467- 0 0 0 0 0 0 0 0 0 0 0 0
56468- 0 0 0 0 0 0 0 0 0 0 0 0
56469- 0 0 0 0 0 0 0 0 0 0 0 0
56470- 0 0 0 0 0 1 0 0 1 0 0 0
56471- 0 0 0 0 0 0 0 0 0 0 0 0
56472- 0 0 0 0 0 0 0 0 0 0 0 0
56473- 0 0 0 0 0 0 0 0 0 0 0 0
56474- 0 0 0 0 0 0 0 0 0 0 0 0
56475- 0 0 0 0 0 0 0 0 0 0 0 0
56476- 0 0 0 0 0 0 0 0 0 0 0 0
56477- 6 6 6 14 14 14 26 26 26 42 42 42
56478- 54 54 54 66 66 66 78 78 78 78 78 78
56479- 78 78 78 74 74 74 66 66 66 54 54 54
56480- 42 42 42 26 26 26 18 18 18 10 10 10
56481- 6 6 6 0 0 0 0 0 0 0 0 0
56482- 0 0 0 0 0 0 0 0 0 0 0 0
56483- 0 0 0 0 0 0 0 0 0 0 0 0
56484- 0 0 0 0 0 0 0 0 0 0 0 0
56485- 0 0 0 0 0 0 0 0 0 0 0 0
56486- 0 0 0 0 0 0 0 0 0 0 0 0
56487- 0 0 0 0 0 0 0 0 0 0 0 0
56488- 0 0 0 0 0 0 0 0 0 0 0 0
56489- 0 0 0 0 0 0 0 0 0 0 0 0
56490- 0 0 1 0 0 0 0 0 0 0 0 0
56491- 0 0 0 0 0 0 0 0 0 0 0 0
56492- 0 0 0 0 0 0 0 0 0 0 0 0
56493- 0 0 0 0 0 0 0 0 0 0 0 0
56494- 0 0 0 0 0 0 0 0 0 0 0 0
56495- 0 0 0 0 0 0 0 0 0 0 0 0
56496- 0 0 0 0 0 0 0 0 0 10 10 10
56497- 22 22 22 42 42 42 66 66 66 86 86 86
56498- 66 66 66 38 38 38 38 38 38 22 22 22
56499- 26 26 26 34 34 34 54 54 54 66 66 66
56500- 86 86 86 70 70 70 46 46 46 26 26 26
56501- 14 14 14 6 6 6 0 0 0 0 0 0
56502- 0 0 0 0 0 0 0 0 0 0 0 0
56503- 0 0 0 0 0 0 0 0 0 0 0 0
56504- 0 0 0 0 0 0 0 0 0 0 0 0
56505- 0 0 0 0 0 0 0 0 0 0 0 0
56506- 0 0 0 0 0 0 0 0 0 0 0 0
56507- 0 0 0 0 0 0 0 0 0 0 0 0
56508- 0 0 0 0 0 0 0 0 0 0 0 0
56509- 0 0 0 0 0 0 0 0 0 0 0 0
56510- 0 0 1 0 0 1 0 0 1 0 0 0
56511- 0 0 0 0 0 0 0 0 0 0 0 0
56512- 0 0 0 0 0 0 0 0 0 0 0 0
56513- 0 0 0 0 0 0 0 0 0 0 0 0
56514- 0 0 0 0 0 0 0 0 0 0 0 0
56515- 0 0 0 0 0 0 0 0 0 0 0 0
56516- 0 0 0 0 0 0 10 10 10 26 26 26
56517- 50 50 50 82 82 82 58 58 58 6 6 6
56518- 2 2 6 2 2 6 2 2 6 2 2 6
56519- 2 2 6 2 2 6 2 2 6 2 2 6
56520- 6 6 6 54 54 54 86 86 86 66 66 66
56521- 38 38 38 18 18 18 6 6 6 0 0 0
56522- 0 0 0 0 0 0 0 0 0 0 0 0
56523- 0 0 0 0 0 0 0 0 0 0 0 0
56524- 0 0 0 0 0 0 0 0 0 0 0 0
56525- 0 0 0 0 0 0 0 0 0 0 0 0
56526- 0 0 0 0 0 0 0 0 0 0 0 0
56527- 0 0 0 0 0 0 0 0 0 0 0 0
56528- 0 0 0 0 0 0 0 0 0 0 0 0
56529- 0 0 0 0 0 0 0 0 0 0 0 0
56530- 0 0 0 0 0 0 0 0 0 0 0 0
56531- 0 0 0 0 0 0 0 0 0 0 0 0
56532- 0 0 0 0 0 0 0 0 0 0 0 0
56533- 0 0 0 0 0 0 0 0 0 0 0 0
56534- 0 0 0 0 0 0 0 0 0 0 0 0
56535- 0 0 0 0 0 0 0 0 0 0 0 0
56536- 0 0 0 6 6 6 22 22 22 50 50 50
56537- 78 78 78 34 34 34 2 2 6 2 2 6
56538- 2 2 6 2 2 6 2 2 6 2 2 6
56539- 2 2 6 2 2 6 2 2 6 2 2 6
56540- 2 2 6 2 2 6 6 6 6 70 70 70
56541- 78 78 78 46 46 46 22 22 22 6 6 6
56542- 0 0 0 0 0 0 0 0 0 0 0 0
56543- 0 0 0 0 0 0 0 0 0 0 0 0
56544- 0 0 0 0 0 0 0 0 0 0 0 0
56545- 0 0 0 0 0 0 0 0 0 0 0 0
56546- 0 0 0 0 0 0 0 0 0 0 0 0
56547- 0 0 0 0 0 0 0 0 0 0 0 0
56548- 0 0 0 0 0 0 0 0 0 0 0 0
56549- 0 0 0 0 0 0 0 0 0 0 0 0
56550- 0 0 1 0 0 1 0 0 1 0 0 0
56551- 0 0 0 0 0 0 0 0 0 0 0 0
56552- 0 0 0 0 0 0 0 0 0 0 0 0
56553- 0 0 0 0 0 0 0 0 0 0 0 0
56554- 0 0 0 0 0 0 0 0 0 0 0 0
56555- 0 0 0 0 0 0 0 0 0 0 0 0
56556- 6 6 6 18 18 18 42 42 42 82 82 82
56557- 26 26 26 2 2 6 2 2 6 2 2 6
56558- 2 2 6 2 2 6 2 2 6 2 2 6
56559- 2 2 6 2 2 6 2 2 6 14 14 14
56560- 46 46 46 34 34 34 6 6 6 2 2 6
56561- 42 42 42 78 78 78 42 42 42 18 18 18
56562- 6 6 6 0 0 0 0 0 0 0 0 0
56563- 0 0 0 0 0 0 0 0 0 0 0 0
56564- 0 0 0 0 0 0 0 0 0 0 0 0
56565- 0 0 0 0 0 0 0 0 0 0 0 0
56566- 0 0 0 0 0 0 0 0 0 0 0 0
56567- 0 0 0 0 0 0 0 0 0 0 0 0
56568- 0 0 0 0 0 0 0 0 0 0 0 0
56569- 0 0 0 0 0 0 0 0 0 0 0 0
56570- 0 0 1 0 0 0 0 0 1 0 0 0
56571- 0 0 0 0 0 0 0 0 0 0 0 0
56572- 0 0 0 0 0 0 0 0 0 0 0 0
56573- 0 0 0 0 0 0 0 0 0 0 0 0
56574- 0 0 0 0 0 0 0 0 0 0 0 0
56575- 0 0 0 0 0 0 0 0 0 0 0 0
56576- 10 10 10 30 30 30 66 66 66 58 58 58
56577- 2 2 6 2 2 6 2 2 6 2 2 6
56578- 2 2 6 2 2 6 2 2 6 2 2 6
56579- 2 2 6 2 2 6 2 2 6 26 26 26
56580- 86 86 86 101 101 101 46 46 46 10 10 10
56581- 2 2 6 58 58 58 70 70 70 34 34 34
56582- 10 10 10 0 0 0 0 0 0 0 0 0
56583- 0 0 0 0 0 0 0 0 0 0 0 0
56584- 0 0 0 0 0 0 0 0 0 0 0 0
56585- 0 0 0 0 0 0 0 0 0 0 0 0
56586- 0 0 0 0 0 0 0 0 0 0 0 0
56587- 0 0 0 0 0 0 0 0 0 0 0 0
56588- 0 0 0 0 0 0 0 0 0 0 0 0
56589- 0 0 0 0 0 0 0 0 0 0 0 0
56590- 0 0 1 0 0 1 0 0 1 0 0 0
56591- 0 0 0 0 0 0 0 0 0 0 0 0
56592- 0 0 0 0 0 0 0 0 0 0 0 0
56593- 0 0 0 0 0 0 0 0 0 0 0 0
56594- 0 0 0 0 0 0 0 0 0 0 0 0
56595- 0 0 0 0 0 0 0 0 0 0 0 0
56596- 14 14 14 42 42 42 86 86 86 10 10 10
56597- 2 2 6 2 2 6 2 2 6 2 2 6
56598- 2 2 6 2 2 6 2 2 6 2 2 6
56599- 2 2 6 2 2 6 2 2 6 30 30 30
56600- 94 94 94 94 94 94 58 58 58 26 26 26
56601- 2 2 6 6 6 6 78 78 78 54 54 54
56602- 22 22 22 6 6 6 0 0 0 0 0 0
56603- 0 0 0 0 0 0 0 0 0 0 0 0
56604- 0 0 0 0 0 0 0 0 0 0 0 0
56605- 0 0 0 0 0 0 0 0 0 0 0 0
56606- 0 0 0 0 0 0 0 0 0 0 0 0
56607- 0 0 0 0 0 0 0 0 0 0 0 0
56608- 0 0 0 0 0 0 0 0 0 0 0 0
56609- 0 0 0 0 0 0 0 0 0 0 0 0
56610- 0 0 0 0 0 0 0 0 0 0 0 0
56611- 0 0 0 0 0 0 0 0 0 0 0 0
56612- 0 0 0 0 0 0 0 0 0 0 0 0
56613- 0 0 0 0 0 0 0 0 0 0 0 0
56614- 0 0 0 0 0 0 0 0 0 0 0 0
56615- 0 0 0 0 0 0 0 0 0 6 6 6
56616- 22 22 22 62 62 62 62 62 62 2 2 6
56617- 2 2 6 2 2 6 2 2 6 2 2 6
56618- 2 2 6 2 2 6 2 2 6 2 2 6
56619- 2 2 6 2 2 6 2 2 6 26 26 26
56620- 54 54 54 38 38 38 18 18 18 10 10 10
56621- 2 2 6 2 2 6 34 34 34 82 82 82
56622- 38 38 38 14 14 14 0 0 0 0 0 0
56623- 0 0 0 0 0 0 0 0 0 0 0 0
56624- 0 0 0 0 0 0 0 0 0 0 0 0
56625- 0 0 0 0 0 0 0 0 0 0 0 0
56626- 0 0 0 0 0 0 0 0 0 0 0 0
56627- 0 0 0 0 0 0 0 0 0 0 0 0
56628- 0 0 0 0 0 0 0 0 0 0 0 0
56629- 0 0 0 0 0 0 0 0 0 0 0 0
56630- 0 0 0 0 0 1 0 0 1 0 0 0
56631- 0 0 0 0 0 0 0 0 0 0 0 0
56632- 0 0 0 0 0 0 0 0 0 0 0 0
56633- 0 0 0 0 0 0 0 0 0 0 0 0
56634- 0 0 0 0 0 0 0 0 0 0 0 0
56635- 0 0 0 0 0 0 0 0 0 6 6 6
56636- 30 30 30 78 78 78 30 30 30 2 2 6
56637- 2 2 6 2 2 6 2 2 6 2 2 6
56638- 2 2 6 2 2 6 2 2 6 2 2 6
56639- 2 2 6 2 2 6 2 2 6 10 10 10
56640- 10 10 10 2 2 6 2 2 6 2 2 6
56641- 2 2 6 2 2 6 2 2 6 78 78 78
56642- 50 50 50 18 18 18 6 6 6 0 0 0
56643- 0 0 0 0 0 0 0 0 0 0 0 0
56644- 0 0 0 0 0 0 0 0 0 0 0 0
56645- 0 0 0 0 0 0 0 0 0 0 0 0
56646- 0 0 0 0 0 0 0 0 0 0 0 0
56647- 0 0 0 0 0 0 0 0 0 0 0 0
56648- 0 0 0 0 0 0 0 0 0 0 0 0
56649- 0 0 0 0 0 0 0 0 0 0 0 0
56650- 0 0 1 0 0 0 0 0 0 0 0 0
56651- 0 0 0 0 0 0 0 0 0 0 0 0
56652- 0 0 0 0 0 0 0 0 0 0 0 0
56653- 0 0 0 0 0 0 0 0 0 0 0 0
56654- 0 0 0 0 0 0 0 0 0 0 0 0
56655- 0 0 0 0 0 0 0 0 0 10 10 10
56656- 38 38 38 86 86 86 14 14 14 2 2 6
56657- 2 2 6 2 2 6 2 2 6 2 2 6
56658- 2 2 6 2 2 6 2 2 6 2 2 6
56659- 2 2 6 2 2 6 2 2 6 2 2 6
56660- 2 2 6 2 2 6 2 2 6 2 2 6
56661- 2 2 6 2 2 6 2 2 6 54 54 54
56662- 66 66 66 26 26 26 6 6 6 0 0 0
56663- 0 0 0 0 0 0 0 0 0 0 0 0
56664- 0 0 0 0 0 0 0 0 0 0 0 0
56665- 0 0 0 0 0 0 0 0 0 0 0 0
56666- 0 0 0 0 0 0 0 0 0 0 0 0
56667- 0 0 0 0 0 0 0 0 0 0 0 0
56668- 0 0 0 0 0 0 0 0 0 0 0 0
56669- 0 0 0 0 0 0 0 0 0 0 0 0
56670- 0 0 0 0 0 1 0 0 1 0 0 0
56671- 0 0 0 0 0 0 0 0 0 0 0 0
56672- 0 0 0 0 0 0 0 0 0 0 0 0
56673- 0 0 0 0 0 0 0 0 0 0 0 0
56674- 0 0 0 0 0 0 0 0 0 0 0 0
56675- 0 0 0 0 0 0 0 0 0 14 14 14
56676- 42 42 42 82 82 82 2 2 6 2 2 6
56677- 2 2 6 6 6 6 10 10 10 2 2 6
56678- 2 2 6 2 2 6 2 2 6 2 2 6
56679- 2 2 6 2 2 6 2 2 6 6 6 6
56680- 14 14 14 10 10 10 2 2 6 2 2 6
56681- 2 2 6 2 2 6 2 2 6 18 18 18
56682- 82 82 82 34 34 34 10 10 10 0 0 0
56683- 0 0 0 0 0 0 0 0 0 0 0 0
56684- 0 0 0 0 0 0 0 0 0 0 0 0
56685- 0 0 0 0 0 0 0 0 0 0 0 0
56686- 0 0 0 0 0 0 0 0 0 0 0 0
56687- 0 0 0 0 0 0 0 0 0 0 0 0
56688- 0 0 0 0 0 0 0 0 0 0 0 0
56689- 0 0 0 0 0 0 0 0 0 0 0 0
56690- 0 0 1 0 0 0 0 0 0 0 0 0
56691- 0 0 0 0 0 0 0 0 0 0 0 0
56692- 0 0 0 0 0 0 0 0 0 0 0 0
56693- 0 0 0 0 0 0 0 0 0 0 0 0
56694- 0 0 0 0 0 0 0 0 0 0 0 0
56695- 0 0 0 0 0 0 0 0 0 14 14 14
56696- 46 46 46 86 86 86 2 2 6 2 2 6
56697- 6 6 6 6 6 6 22 22 22 34 34 34
56698- 6 6 6 2 2 6 2 2 6 2 2 6
56699- 2 2 6 2 2 6 18 18 18 34 34 34
56700- 10 10 10 50 50 50 22 22 22 2 2 6
56701- 2 2 6 2 2 6 2 2 6 10 10 10
56702- 86 86 86 42 42 42 14 14 14 0 0 0
56703- 0 0 0 0 0 0 0 0 0 0 0 0
56704- 0 0 0 0 0 0 0 0 0 0 0 0
56705- 0 0 0 0 0 0 0 0 0 0 0 0
56706- 0 0 0 0 0 0 0 0 0 0 0 0
56707- 0 0 0 0 0 0 0 0 0 0 0 0
56708- 0 0 0 0 0 0 0 0 0 0 0 0
56709- 0 0 0 0 0 0 0 0 0 0 0 0
56710- 0 0 1 0 0 1 0 0 1 0 0 0
56711- 0 0 0 0 0 0 0 0 0 0 0 0
56712- 0 0 0 0 0 0 0 0 0 0 0 0
56713- 0 0 0 0 0 0 0 0 0 0 0 0
56714- 0 0 0 0 0 0 0 0 0 0 0 0
56715- 0 0 0 0 0 0 0 0 0 14 14 14
56716- 46 46 46 86 86 86 2 2 6 2 2 6
56717- 38 38 38 116 116 116 94 94 94 22 22 22
56718- 22 22 22 2 2 6 2 2 6 2 2 6
56719- 14 14 14 86 86 86 138 138 138 162 162 162
56720-154 154 154 38 38 38 26 26 26 6 6 6
56721- 2 2 6 2 2 6 2 2 6 2 2 6
56722- 86 86 86 46 46 46 14 14 14 0 0 0
56723- 0 0 0 0 0 0 0 0 0 0 0 0
56724- 0 0 0 0 0 0 0 0 0 0 0 0
56725- 0 0 0 0 0 0 0 0 0 0 0 0
56726- 0 0 0 0 0 0 0 0 0 0 0 0
56727- 0 0 0 0 0 0 0 0 0 0 0 0
56728- 0 0 0 0 0 0 0 0 0 0 0 0
56729- 0 0 0 0 0 0 0 0 0 0 0 0
56730- 0 0 0 0 0 0 0 0 0 0 0 0
56731- 0 0 0 0 0 0 0 0 0 0 0 0
56732- 0 0 0 0 0 0 0 0 0 0 0 0
56733- 0 0 0 0 0 0 0 0 0 0 0 0
56734- 0 0 0 0 0 0 0 0 0 0 0 0
56735- 0 0 0 0 0 0 0 0 0 14 14 14
56736- 46 46 46 86 86 86 2 2 6 14 14 14
56737-134 134 134 198 198 198 195 195 195 116 116 116
56738- 10 10 10 2 2 6 2 2 6 6 6 6
56739-101 98 89 187 187 187 210 210 210 218 218 218
56740-214 214 214 134 134 134 14 14 14 6 6 6
56741- 2 2 6 2 2 6 2 2 6 2 2 6
56742- 86 86 86 50 50 50 18 18 18 6 6 6
56743- 0 0 0 0 0 0 0 0 0 0 0 0
56744- 0 0 0 0 0 0 0 0 0 0 0 0
56745- 0 0 0 0 0 0 0 0 0 0 0 0
56746- 0 0 0 0 0 0 0 0 0 0 0 0
56747- 0 0 0 0 0 0 0 0 0 0 0 0
56748- 0 0 0 0 0 0 0 0 0 0 0 0
56749- 0 0 0 0 0 0 0 0 1 0 0 0
56750- 0 0 1 0 0 1 0 0 1 0 0 0
56751- 0 0 0 0 0 0 0 0 0 0 0 0
56752- 0 0 0 0 0 0 0 0 0 0 0 0
56753- 0 0 0 0 0 0 0 0 0 0 0 0
56754- 0 0 0 0 0 0 0 0 0 0 0 0
56755- 0 0 0 0 0 0 0 0 0 14 14 14
56756- 46 46 46 86 86 86 2 2 6 54 54 54
56757-218 218 218 195 195 195 226 226 226 246 246 246
56758- 58 58 58 2 2 6 2 2 6 30 30 30
56759-210 210 210 253 253 253 174 174 174 123 123 123
56760-221 221 221 234 234 234 74 74 74 2 2 6
56761- 2 2 6 2 2 6 2 2 6 2 2 6
56762- 70 70 70 58 58 58 22 22 22 6 6 6
56763- 0 0 0 0 0 0 0 0 0 0 0 0
56764- 0 0 0 0 0 0 0 0 0 0 0 0
56765- 0 0 0 0 0 0 0 0 0 0 0 0
56766- 0 0 0 0 0 0 0 0 0 0 0 0
56767- 0 0 0 0 0 0 0 0 0 0 0 0
56768- 0 0 0 0 0 0 0 0 0 0 0 0
56769- 0 0 0 0 0 0 0 0 0 0 0 0
56770- 0 0 0 0 0 0 0 0 0 0 0 0
56771- 0 0 0 0 0 0 0 0 0 0 0 0
56772- 0 0 0 0 0 0 0 0 0 0 0 0
56773- 0 0 0 0 0 0 0 0 0 0 0 0
56774- 0 0 0 0 0 0 0 0 0 0 0 0
56775- 0 0 0 0 0 0 0 0 0 14 14 14
56776- 46 46 46 82 82 82 2 2 6 106 106 106
56777-170 170 170 26 26 26 86 86 86 226 226 226
56778-123 123 123 10 10 10 14 14 14 46 46 46
56779-231 231 231 190 190 190 6 6 6 70 70 70
56780- 90 90 90 238 238 238 158 158 158 2 2 6
56781- 2 2 6 2 2 6 2 2 6 2 2 6
56782- 70 70 70 58 58 58 22 22 22 6 6 6
56783- 0 0 0 0 0 0 0 0 0 0 0 0
56784- 0 0 0 0 0 0 0 0 0 0 0 0
56785- 0 0 0 0 0 0 0 0 0 0 0 0
56786- 0 0 0 0 0 0 0 0 0 0 0 0
56787- 0 0 0 0 0 0 0 0 0 0 0 0
56788- 0 0 0 0 0 0 0 0 0 0 0 0
56789- 0 0 0 0 0 0 0 0 1 0 0 0
56790- 0 0 1 0 0 1 0 0 1 0 0 0
56791- 0 0 0 0 0 0 0 0 0 0 0 0
56792- 0 0 0 0 0 0 0 0 0 0 0 0
56793- 0 0 0 0 0 0 0 0 0 0 0 0
56794- 0 0 0 0 0 0 0 0 0 0 0 0
56795- 0 0 0 0 0 0 0 0 0 14 14 14
56796- 42 42 42 86 86 86 6 6 6 116 116 116
56797-106 106 106 6 6 6 70 70 70 149 149 149
56798-128 128 128 18 18 18 38 38 38 54 54 54
56799-221 221 221 106 106 106 2 2 6 14 14 14
56800- 46 46 46 190 190 190 198 198 198 2 2 6
56801- 2 2 6 2 2 6 2 2 6 2 2 6
56802- 74 74 74 62 62 62 22 22 22 6 6 6
56803- 0 0 0 0 0 0 0 0 0 0 0 0
56804- 0 0 0 0 0 0 0 0 0 0 0 0
56805- 0 0 0 0 0 0 0 0 0 0 0 0
56806- 0 0 0 0 0 0 0 0 0 0 0 0
56807- 0 0 0 0 0 0 0 0 0 0 0 0
56808- 0 0 0 0 0 0 0 0 0 0 0 0
56809- 0 0 0 0 0 0 0 0 1 0 0 0
56810- 0 0 1 0 0 0 0 0 1 0 0 0
56811- 0 0 0 0 0 0 0 0 0 0 0 0
56812- 0 0 0 0 0 0 0 0 0 0 0 0
56813- 0 0 0 0 0 0 0 0 0 0 0 0
56814- 0 0 0 0 0 0 0 0 0 0 0 0
56815- 0 0 0 0 0 0 0 0 0 14 14 14
56816- 42 42 42 94 94 94 14 14 14 101 101 101
56817-128 128 128 2 2 6 18 18 18 116 116 116
56818-118 98 46 121 92 8 121 92 8 98 78 10
56819-162 162 162 106 106 106 2 2 6 2 2 6
56820- 2 2 6 195 195 195 195 195 195 6 6 6
56821- 2 2 6 2 2 6 2 2 6 2 2 6
56822- 74 74 74 62 62 62 22 22 22 6 6 6
56823- 0 0 0 0 0 0 0 0 0 0 0 0
56824- 0 0 0 0 0 0 0 0 0 0 0 0
56825- 0 0 0 0 0 0 0 0 0 0 0 0
56826- 0 0 0 0 0 0 0 0 0 0 0 0
56827- 0 0 0 0 0 0 0 0 0 0 0 0
56828- 0 0 0 0 0 0 0 0 0 0 0 0
56829- 0 0 0 0 0 0 0 0 1 0 0 1
56830- 0 0 1 0 0 0 0 0 1 0 0 0
56831- 0 0 0 0 0 0 0 0 0 0 0 0
56832- 0 0 0 0 0 0 0 0 0 0 0 0
56833- 0 0 0 0 0 0 0 0 0 0 0 0
56834- 0 0 0 0 0 0 0 0 0 0 0 0
56835- 0 0 0 0 0 0 0 0 0 10 10 10
56836- 38 38 38 90 90 90 14 14 14 58 58 58
56837-210 210 210 26 26 26 54 38 6 154 114 10
56838-226 170 11 236 186 11 225 175 15 184 144 12
56839-215 174 15 175 146 61 37 26 9 2 2 6
56840- 70 70 70 246 246 246 138 138 138 2 2 6
56841- 2 2 6 2 2 6 2 2 6 2 2 6
56842- 70 70 70 66 66 66 26 26 26 6 6 6
56843- 0 0 0 0 0 0 0 0 0 0 0 0
56844- 0 0 0 0 0 0 0 0 0 0 0 0
56845- 0 0 0 0 0 0 0 0 0 0 0 0
56846- 0 0 0 0 0 0 0 0 0 0 0 0
56847- 0 0 0 0 0 0 0 0 0 0 0 0
56848- 0 0 0 0 0 0 0 0 0 0 0 0
56849- 0 0 0 0 0 0 0 0 0 0 0 0
56850- 0 0 0 0 0 0 0 0 0 0 0 0
56851- 0 0 0 0 0 0 0 0 0 0 0 0
56852- 0 0 0 0 0 0 0 0 0 0 0 0
56853- 0 0 0 0 0 0 0 0 0 0 0 0
56854- 0 0 0 0 0 0 0 0 0 0 0 0
56855- 0 0 0 0 0 0 0 0 0 10 10 10
56856- 38 38 38 86 86 86 14 14 14 10 10 10
56857-195 195 195 188 164 115 192 133 9 225 175 15
56858-239 182 13 234 190 10 232 195 16 232 200 30
56859-245 207 45 241 208 19 232 195 16 184 144 12
56860-218 194 134 211 206 186 42 42 42 2 2 6
56861- 2 2 6 2 2 6 2 2 6 2 2 6
56862- 50 50 50 74 74 74 30 30 30 6 6 6
56863- 0 0 0 0 0 0 0 0 0 0 0 0
56864- 0 0 0 0 0 0 0 0 0 0 0 0
56865- 0 0 0 0 0 0 0 0 0 0 0 0
56866- 0 0 0 0 0 0 0 0 0 0 0 0
56867- 0 0 0 0 0 0 0 0 0 0 0 0
56868- 0 0 0 0 0 0 0 0 0 0 0 0
56869- 0 0 0 0 0 0 0 0 0 0 0 0
56870- 0 0 0 0 0 0 0 0 0 0 0 0
56871- 0 0 0 0 0 0 0 0 0 0 0 0
56872- 0 0 0 0 0 0 0 0 0 0 0 0
56873- 0 0 0 0 0 0 0 0 0 0 0 0
56874- 0 0 0 0 0 0 0 0 0 0 0 0
56875- 0 0 0 0 0 0 0 0 0 10 10 10
56876- 34 34 34 86 86 86 14 14 14 2 2 6
56877-121 87 25 192 133 9 219 162 10 239 182 13
56878-236 186 11 232 195 16 241 208 19 244 214 54
56879-246 218 60 246 218 38 246 215 20 241 208 19
56880-241 208 19 226 184 13 121 87 25 2 2 6
56881- 2 2 6 2 2 6 2 2 6 2 2 6
56882- 50 50 50 82 82 82 34 34 34 10 10 10
56883- 0 0 0 0 0 0 0 0 0 0 0 0
56884- 0 0 0 0 0 0 0 0 0 0 0 0
56885- 0 0 0 0 0 0 0 0 0 0 0 0
56886- 0 0 0 0 0 0 0 0 0 0 0 0
56887- 0 0 0 0 0 0 0 0 0 0 0 0
56888- 0 0 0 0 0 0 0 0 0 0 0 0
56889- 0 0 0 0 0 0 0 0 0 0 0 0
56890- 0 0 0 0 0 0 0 0 0 0 0 0
56891- 0 0 0 0 0 0 0 0 0 0 0 0
56892- 0 0 0 0 0 0 0 0 0 0 0 0
56893- 0 0 0 0 0 0 0 0 0 0 0 0
56894- 0 0 0 0 0 0 0 0 0 0 0 0
56895- 0 0 0 0 0 0 0 0 0 10 10 10
56896- 34 34 34 82 82 82 30 30 30 61 42 6
56897-180 123 7 206 145 10 230 174 11 239 182 13
56898-234 190 10 238 202 15 241 208 19 246 218 74
56899-246 218 38 246 215 20 246 215 20 246 215 20
56900-226 184 13 215 174 15 184 144 12 6 6 6
56901- 2 2 6 2 2 6 2 2 6 2 2 6
56902- 26 26 26 94 94 94 42 42 42 14 14 14
56903- 0 0 0 0 0 0 0 0 0 0 0 0
56904- 0 0 0 0 0 0 0 0 0 0 0 0
56905- 0 0 0 0 0 0 0 0 0 0 0 0
56906- 0 0 0 0 0 0 0 0 0 0 0 0
56907- 0 0 0 0 0 0 0 0 0 0 0 0
56908- 0 0 0 0 0 0 0 0 0 0 0 0
56909- 0 0 0 0 0 0 0 0 0 0 0 0
56910- 0 0 0 0 0 0 0 0 0 0 0 0
56911- 0 0 0 0 0 0 0 0 0 0 0 0
56912- 0 0 0 0 0 0 0 0 0 0 0 0
56913- 0 0 0 0 0 0 0 0 0 0 0 0
56914- 0 0 0 0 0 0 0 0 0 0 0 0
56915- 0 0 0 0 0 0 0 0 0 10 10 10
56916- 30 30 30 78 78 78 50 50 50 104 69 6
56917-192 133 9 216 158 10 236 178 12 236 186 11
56918-232 195 16 241 208 19 244 214 54 245 215 43
56919-246 215 20 246 215 20 241 208 19 198 155 10
56920-200 144 11 216 158 10 156 118 10 2 2 6
56921- 2 2 6 2 2 6 2 2 6 2 2 6
56922- 6 6 6 90 90 90 54 54 54 18 18 18
56923- 6 6 6 0 0 0 0 0 0 0 0 0
56924- 0 0 0 0 0 0 0 0 0 0 0 0
56925- 0 0 0 0 0 0 0 0 0 0 0 0
56926- 0 0 0 0 0 0 0 0 0 0 0 0
56927- 0 0 0 0 0 0 0 0 0 0 0 0
56928- 0 0 0 0 0 0 0 0 0 0 0 0
56929- 0 0 0 0 0 0 0 0 0 0 0 0
56930- 0 0 0 0 0 0 0 0 0 0 0 0
56931- 0 0 0 0 0 0 0 0 0 0 0 0
56932- 0 0 0 0 0 0 0 0 0 0 0 0
56933- 0 0 0 0 0 0 0 0 0 0 0 0
56934- 0 0 0 0 0 0 0 0 0 0 0 0
56935- 0 0 0 0 0 0 0 0 0 10 10 10
56936- 30 30 30 78 78 78 46 46 46 22 22 22
56937-137 92 6 210 162 10 239 182 13 238 190 10
56938-238 202 15 241 208 19 246 215 20 246 215 20
56939-241 208 19 203 166 17 185 133 11 210 150 10
56940-216 158 10 210 150 10 102 78 10 2 2 6
56941- 6 6 6 54 54 54 14 14 14 2 2 6
56942- 2 2 6 62 62 62 74 74 74 30 30 30
56943- 10 10 10 0 0 0 0 0 0 0 0 0
56944- 0 0 0 0 0 0 0 0 0 0 0 0
56945- 0 0 0 0 0 0 0 0 0 0 0 0
56946- 0 0 0 0 0 0 0 0 0 0 0 0
56947- 0 0 0 0 0 0 0 0 0 0 0 0
56948- 0 0 0 0 0 0 0 0 0 0 0 0
56949- 0 0 0 0 0 0 0 0 0 0 0 0
56950- 0 0 0 0 0 0 0 0 0 0 0 0
56951- 0 0 0 0 0 0 0 0 0 0 0 0
56952- 0 0 0 0 0 0 0 0 0 0 0 0
56953- 0 0 0 0 0 0 0 0 0 0 0 0
56954- 0 0 0 0 0 0 0 0 0 0 0 0
56955- 0 0 0 0 0 0 0 0 0 10 10 10
56956- 34 34 34 78 78 78 50 50 50 6 6 6
56957- 94 70 30 139 102 15 190 146 13 226 184 13
56958-232 200 30 232 195 16 215 174 15 190 146 13
56959-168 122 10 192 133 9 210 150 10 213 154 11
56960-202 150 34 182 157 106 101 98 89 2 2 6
56961- 2 2 6 78 78 78 116 116 116 58 58 58
56962- 2 2 6 22 22 22 90 90 90 46 46 46
56963- 18 18 18 6 6 6 0 0 0 0 0 0
56964- 0 0 0 0 0 0 0 0 0 0 0 0
56965- 0 0 0 0 0 0 0 0 0 0 0 0
56966- 0 0 0 0 0 0 0 0 0 0 0 0
56967- 0 0 0 0 0 0 0 0 0 0 0 0
56968- 0 0 0 0 0 0 0 0 0 0 0 0
56969- 0 0 0 0 0 0 0 0 0 0 0 0
56970- 0 0 0 0 0 0 0 0 0 0 0 0
56971- 0 0 0 0 0 0 0 0 0 0 0 0
56972- 0 0 0 0 0 0 0 0 0 0 0 0
56973- 0 0 0 0 0 0 0 0 0 0 0 0
56974- 0 0 0 0 0 0 0 0 0 0 0 0
56975- 0 0 0 0 0 0 0 0 0 10 10 10
56976- 38 38 38 86 86 86 50 50 50 6 6 6
56977-128 128 128 174 154 114 156 107 11 168 122 10
56978-198 155 10 184 144 12 197 138 11 200 144 11
56979-206 145 10 206 145 10 197 138 11 188 164 115
56980-195 195 195 198 198 198 174 174 174 14 14 14
56981- 2 2 6 22 22 22 116 116 116 116 116 116
56982- 22 22 22 2 2 6 74 74 74 70 70 70
56983- 30 30 30 10 10 10 0 0 0 0 0 0
56984- 0 0 0 0 0 0 0 0 0 0 0 0
56985- 0 0 0 0 0 0 0 0 0 0 0 0
56986- 0 0 0 0 0 0 0 0 0 0 0 0
56987- 0 0 0 0 0 0 0 0 0 0 0 0
56988- 0 0 0 0 0 0 0 0 0 0 0 0
56989- 0 0 0 0 0 0 0 0 0 0 0 0
56990- 0 0 0 0 0 0 0 0 0 0 0 0
56991- 0 0 0 0 0 0 0 0 0 0 0 0
56992- 0 0 0 0 0 0 0 0 0 0 0 0
56993- 0 0 0 0 0 0 0 0 0 0 0 0
56994- 0 0 0 0 0 0 0 0 0 0 0 0
56995- 0 0 0 0 0 0 6 6 6 18 18 18
56996- 50 50 50 101 101 101 26 26 26 10 10 10
56997-138 138 138 190 190 190 174 154 114 156 107 11
56998-197 138 11 200 144 11 197 138 11 192 133 9
56999-180 123 7 190 142 34 190 178 144 187 187 187
57000-202 202 202 221 221 221 214 214 214 66 66 66
57001- 2 2 6 2 2 6 50 50 50 62 62 62
57002- 6 6 6 2 2 6 10 10 10 90 90 90
57003- 50 50 50 18 18 18 6 6 6 0 0 0
57004- 0 0 0 0 0 0 0 0 0 0 0 0
57005- 0 0 0 0 0 0 0 0 0 0 0 0
57006- 0 0 0 0 0 0 0 0 0 0 0 0
57007- 0 0 0 0 0 0 0 0 0 0 0 0
57008- 0 0 0 0 0 0 0 0 0 0 0 0
57009- 0 0 0 0 0 0 0 0 0 0 0 0
57010- 0 0 0 0 0 0 0 0 0 0 0 0
57011- 0 0 0 0 0 0 0 0 0 0 0 0
57012- 0 0 0 0 0 0 0 0 0 0 0 0
57013- 0 0 0 0 0 0 0 0 0 0 0 0
57014- 0 0 0 0 0 0 0 0 0 0 0 0
57015- 0 0 0 0 0 0 10 10 10 34 34 34
57016- 74 74 74 74 74 74 2 2 6 6 6 6
57017-144 144 144 198 198 198 190 190 190 178 166 146
57018-154 121 60 156 107 11 156 107 11 168 124 44
57019-174 154 114 187 187 187 190 190 190 210 210 210
57020-246 246 246 253 253 253 253 253 253 182 182 182
57021- 6 6 6 2 2 6 2 2 6 2 2 6
57022- 2 2 6 2 2 6 2 2 6 62 62 62
57023- 74 74 74 34 34 34 14 14 14 0 0 0
57024- 0 0 0 0 0 0 0 0 0 0 0 0
57025- 0 0 0 0 0 0 0 0 0 0 0 0
57026- 0 0 0 0 0 0 0 0 0 0 0 0
57027- 0 0 0 0 0 0 0 0 0 0 0 0
57028- 0 0 0 0 0 0 0 0 0 0 0 0
57029- 0 0 0 0 0 0 0 0 0 0 0 0
57030- 0 0 0 0 0 0 0 0 0 0 0 0
57031- 0 0 0 0 0 0 0 0 0 0 0 0
57032- 0 0 0 0 0 0 0 0 0 0 0 0
57033- 0 0 0 0 0 0 0 0 0 0 0 0
57034- 0 0 0 0 0 0 0 0 0 0 0 0
57035- 0 0 0 10 10 10 22 22 22 54 54 54
57036- 94 94 94 18 18 18 2 2 6 46 46 46
57037-234 234 234 221 221 221 190 190 190 190 190 190
57038-190 190 190 187 187 187 187 187 187 190 190 190
57039-190 190 190 195 195 195 214 214 214 242 242 242
57040-253 253 253 253 253 253 253 253 253 253 253 253
57041- 82 82 82 2 2 6 2 2 6 2 2 6
57042- 2 2 6 2 2 6 2 2 6 14 14 14
57043- 86 86 86 54 54 54 22 22 22 6 6 6
57044- 0 0 0 0 0 0 0 0 0 0 0 0
57045- 0 0 0 0 0 0 0 0 0 0 0 0
57046- 0 0 0 0 0 0 0 0 0 0 0 0
57047- 0 0 0 0 0 0 0 0 0 0 0 0
57048- 0 0 0 0 0 0 0 0 0 0 0 0
57049- 0 0 0 0 0 0 0 0 0 0 0 0
57050- 0 0 0 0 0 0 0 0 0 0 0 0
57051- 0 0 0 0 0 0 0 0 0 0 0 0
57052- 0 0 0 0 0 0 0 0 0 0 0 0
57053- 0 0 0 0 0 0 0 0 0 0 0 0
57054- 0 0 0 0 0 0 0 0 0 0 0 0
57055- 6 6 6 18 18 18 46 46 46 90 90 90
57056- 46 46 46 18 18 18 6 6 6 182 182 182
57057-253 253 253 246 246 246 206 206 206 190 190 190
57058-190 190 190 190 190 190 190 190 190 190 190 190
57059-206 206 206 231 231 231 250 250 250 253 253 253
57060-253 253 253 253 253 253 253 253 253 253 253 253
57061-202 202 202 14 14 14 2 2 6 2 2 6
57062- 2 2 6 2 2 6 2 2 6 2 2 6
57063- 42 42 42 86 86 86 42 42 42 18 18 18
57064- 6 6 6 0 0 0 0 0 0 0 0 0
57065- 0 0 0 0 0 0 0 0 0 0 0 0
57066- 0 0 0 0 0 0 0 0 0 0 0 0
57067- 0 0 0 0 0 0 0 0 0 0 0 0
57068- 0 0 0 0 0 0 0 0 0 0 0 0
57069- 0 0 0 0 0 0 0 0 0 0 0 0
57070- 0 0 0 0 0 0 0 0 0 0 0 0
57071- 0 0 0 0 0 0 0 0 0 0 0 0
57072- 0 0 0 0 0 0 0 0 0 0 0 0
57073- 0 0 0 0 0 0 0 0 0 0 0 0
57074- 0 0 0 0 0 0 0 0 0 6 6 6
57075- 14 14 14 38 38 38 74 74 74 66 66 66
57076- 2 2 6 6 6 6 90 90 90 250 250 250
57077-253 253 253 253 253 253 238 238 238 198 198 198
57078-190 190 190 190 190 190 195 195 195 221 221 221
57079-246 246 246 253 253 253 253 253 253 253 253 253
57080-253 253 253 253 253 253 253 253 253 253 253 253
57081-253 253 253 82 82 82 2 2 6 2 2 6
57082- 2 2 6 2 2 6 2 2 6 2 2 6
57083- 2 2 6 78 78 78 70 70 70 34 34 34
57084- 14 14 14 6 6 6 0 0 0 0 0 0
57085- 0 0 0 0 0 0 0 0 0 0 0 0
57086- 0 0 0 0 0 0 0 0 0 0 0 0
57087- 0 0 0 0 0 0 0 0 0 0 0 0
57088- 0 0 0 0 0 0 0 0 0 0 0 0
57089- 0 0 0 0 0 0 0 0 0 0 0 0
57090- 0 0 0 0 0 0 0 0 0 0 0 0
57091- 0 0 0 0 0 0 0 0 0 0 0 0
57092- 0 0 0 0 0 0 0 0 0 0 0 0
57093- 0 0 0 0 0 0 0 0 0 0 0 0
57094- 0 0 0 0 0 0 0 0 0 14 14 14
57095- 34 34 34 66 66 66 78 78 78 6 6 6
57096- 2 2 6 18 18 18 218 218 218 253 253 253
57097-253 253 253 253 253 253 253 253 253 246 246 246
57098-226 226 226 231 231 231 246 246 246 253 253 253
57099-253 253 253 253 253 253 253 253 253 253 253 253
57100-253 253 253 253 253 253 253 253 253 253 253 253
57101-253 253 253 178 178 178 2 2 6 2 2 6
57102- 2 2 6 2 2 6 2 2 6 2 2 6
57103- 2 2 6 18 18 18 90 90 90 62 62 62
57104- 30 30 30 10 10 10 0 0 0 0 0 0
57105- 0 0 0 0 0 0 0 0 0 0 0 0
57106- 0 0 0 0 0 0 0 0 0 0 0 0
57107- 0 0 0 0 0 0 0 0 0 0 0 0
57108- 0 0 0 0 0 0 0 0 0 0 0 0
57109- 0 0 0 0 0 0 0 0 0 0 0 0
57110- 0 0 0 0 0 0 0 0 0 0 0 0
57111- 0 0 0 0 0 0 0 0 0 0 0 0
57112- 0 0 0 0 0 0 0 0 0 0 0 0
57113- 0 0 0 0 0 0 0 0 0 0 0 0
57114- 0 0 0 0 0 0 10 10 10 26 26 26
57115- 58 58 58 90 90 90 18 18 18 2 2 6
57116- 2 2 6 110 110 110 253 253 253 253 253 253
57117-253 253 253 253 253 253 253 253 253 253 253 253
57118-250 250 250 253 253 253 253 253 253 253 253 253
57119-253 253 253 253 253 253 253 253 253 253 253 253
57120-253 253 253 253 253 253 253 253 253 253 253 253
57121-253 253 253 231 231 231 18 18 18 2 2 6
57122- 2 2 6 2 2 6 2 2 6 2 2 6
57123- 2 2 6 2 2 6 18 18 18 94 94 94
57124- 54 54 54 26 26 26 10 10 10 0 0 0
57125- 0 0 0 0 0 0 0 0 0 0 0 0
57126- 0 0 0 0 0 0 0 0 0 0 0 0
57127- 0 0 0 0 0 0 0 0 0 0 0 0
57128- 0 0 0 0 0 0 0 0 0 0 0 0
57129- 0 0 0 0 0 0 0 0 0 0 0 0
57130- 0 0 0 0 0 0 0 0 0 0 0 0
57131- 0 0 0 0 0 0 0 0 0 0 0 0
57132- 0 0 0 0 0 0 0 0 0 0 0 0
57133- 0 0 0 0 0 0 0 0 0 0 0 0
57134- 0 0 0 6 6 6 22 22 22 50 50 50
57135- 90 90 90 26 26 26 2 2 6 2 2 6
57136- 14 14 14 195 195 195 250 250 250 253 253 253
57137-253 253 253 253 253 253 253 253 253 253 253 253
57138-253 253 253 253 253 253 253 253 253 253 253 253
57139-253 253 253 253 253 253 253 253 253 253 253 253
57140-253 253 253 253 253 253 253 253 253 253 253 253
57141-250 250 250 242 242 242 54 54 54 2 2 6
57142- 2 2 6 2 2 6 2 2 6 2 2 6
57143- 2 2 6 2 2 6 2 2 6 38 38 38
57144- 86 86 86 50 50 50 22 22 22 6 6 6
57145- 0 0 0 0 0 0 0 0 0 0 0 0
57146- 0 0 0 0 0 0 0 0 0 0 0 0
57147- 0 0 0 0 0 0 0 0 0 0 0 0
57148- 0 0 0 0 0 0 0 0 0 0 0 0
57149- 0 0 0 0 0 0 0 0 0 0 0 0
57150- 0 0 0 0 0 0 0 0 0 0 0 0
57151- 0 0 0 0 0 0 0 0 0 0 0 0
57152- 0 0 0 0 0 0 0 0 0 0 0 0
57153- 0 0 0 0 0 0 0 0 0 0 0 0
57154- 6 6 6 14 14 14 38 38 38 82 82 82
57155- 34 34 34 2 2 6 2 2 6 2 2 6
57156- 42 42 42 195 195 195 246 246 246 253 253 253
57157-253 253 253 253 253 253 253 253 253 250 250 250
57158-242 242 242 242 242 242 250 250 250 253 253 253
57159-253 253 253 253 253 253 253 253 253 253 253 253
57160-253 253 253 250 250 250 246 246 246 238 238 238
57161-226 226 226 231 231 231 101 101 101 6 6 6
57162- 2 2 6 2 2 6 2 2 6 2 2 6
57163- 2 2 6 2 2 6 2 2 6 2 2 6
57164- 38 38 38 82 82 82 42 42 42 14 14 14
57165- 6 6 6 0 0 0 0 0 0 0 0 0
57166- 0 0 0 0 0 0 0 0 0 0 0 0
57167- 0 0 0 0 0 0 0 0 0 0 0 0
57168- 0 0 0 0 0 0 0 0 0 0 0 0
57169- 0 0 0 0 0 0 0 0 0 0 0 0
57170- 0 0 0 0 0 0 0 0 0 0 0 0
57171- 0 0 0 0 0 0 0 0 0 0 0 0
57172- 0 0 0 0 0 0 0 0 0 0 0 0
57173- 0 0 0 0 0 0 0 0 0 0 0 0
57174- 10 10 10 26 26 26 62 62 62 66 66 66
57175- 2 2 6 2 2 6 2 2 6 6 6 6
57176- 70 70 70 170 170 170 206 206 206 234 234 234
57177-246 246 246 250 250 250 250 250 250 238 238 238
57178-226 226 226 231 231 231 238 238 238 250 250 250
57179-250 250 250 250 250 250 246 246 246 231 231 231
57180-214 214 214 206 206 206 202 202 202 202 202 202
57181-198 198 198 202 202 202 182 182 182 18 18 18
57182- 2 2 6 2 2 6 2 2 6 2 2 6
57183- 2 2 6 2 2 6 2 2 6 2 2 6
57184- 2 2 6 62 62 62 66 66 66 30 30 30
57185- 10 10 10 0 0 0 0 0 0 0 0 0
57186- 0 0 0 0 0 0 0 0 0 0 0 0
57187- 0 0 0 0 0 0 0 0 0 0 0 0
57188- 0 0 0 0 0 0 0 0 0 0 0 0
57189- 0 0 0 0 0 0 0 0 0 0 0 0
57190- 0 0 0 0 0 0 0 0 0 0 0 0
57191- 0 0 0 0 0 0 0 0 0 0 0 0
57192- 0 0 0 0 0 0 0 0 0 0 0 0
57193- 0 0 0 0 0 0 0 0 0 0 0 0
57194- 14 14 14 42 42 42 82 82 82 18 18 18
57195- 2 2 6 2 2 6 2 2 6 10 10 10
57196- 94 94 94 182 182 182 218 218 218 242 242 242
57197-250 250 250 253 253 253 253 253 253 250 250 250
57198-234 234 234 253 253 253 253 253 253 253 253 253
57199-253 253 253 253 253 253 253 253 253 246 246 246
57200-238 238 238 226 226 226 210 210 210 202 202 202
57201-195 195 195 195 195 195 210 210 210 158 158 158
57202- 6 6 6 14 14 14 50 50 50 14 14 14
57203- 2 2 6 2 2 6 2 2 6 2 2 6
57204- 2 2 6 6 6 6 86 86 86 46 46 46
57205- 18 18 18 6 6 6 0 0 0 0 0 0
57206- 0 0 0 0 0 0 0 0 0 0 0 0
57207- 0 0 0 0 0 0 0 0 0 0 0 0
57208- 0 0 0 0 0 0 0 0 0 0 0 0
57209- 0 0 0 0 0 0 0 0 0 0 0 0
57210- 0 0 0 0 0 0 0 0 0 0 0 0
57211- 0 0 0 0 0 0 0 0 0 0 0 0
57212- 0 0 0 0 0 0 0 0 0 0 0 0
57213- 0 0 0 0 0 0 0 0 0 6 6 6
57214- 22 22 22 54 54 54 70 70 70 2 2 6
57215- 2 2 6 10 10 10 2 2 6 22 22 22
57216-166 166 166 231 231 231 250 250 250 253 253 253
57217-253 253 253 253 253 253 253 253 253 250 250 250
57218-242 242 242 253 253 253 253 253 253 253 253 253
57219-253 253 253 253 253 253 253 253 253 253 253 253
57220-253 253 253 253 253 253 253 253 253 246 246 246
57221-231 231 231 206 206 206 198 198 198 226 226 226
57222- 94 94 94 2 2 6 6 6 6 38 38 38
57223- 30 30 30 2 2 6 2 2 6 2 2 6
57224- 2 2 6 2 2 6 62 62 62 66 66 66
57225- 26 26 26 10 10 10 0 0 0 0 0 0
57226- 0 0 0 0 0 0 0 0 0 0 0 0
57227- 0 0 0 0 0 0 0 0 0 0 0 0
57228- 0 0 0 0 0 0 0 0 0 0 0 0
57229- 0 0 0 0 0 0 0 0 0 0 0 0
57230- 0 0 0 0 0 0 0 0 0 0 0 0
57231- 0 0 0 0 0 0 0 0 0 0 0 0
57232- 0 0 0 0 0 0 0 0 0 0 0 0
57233- 0 0 0 0 0 0 0 0 0 10 10 10
57234- 30 30 30 74 74 74 50 50 50 2 2 6
57235- 26 26 26 26 26 26 2 2 6 106 106 106
57236-238 238 238 253 253 253 253 253 253 253 253 253
57237-253 253 253 253 253 253 253 253 253 253 253 253
57238-253 253 253 253 253 253 253 253 253 253 253 253
57239-253 253 253 253 253 253 253 253 253 253 253 253
57240-253 253 253 253 253 253 253 253 253 253 253 253
57241-253 253 253 246 246 246 218 218 218 202 202 202
57242-210 210 210 14 14 14 2 2 6 2 2 6
57243- 30 30 30 22 22 22 2 2 6 2 2 6
57244- 2 2 6 2 2 6 18 18 18 86 86 86
57245- 42 42 42 14 14 14 0 0 0 0 0 0
57246- 0 0 0 0 0 0 0 0 0 0 0 0
57247- 0 0 0 0 0 0 0 0 0 0 0 0
57248- 0 0 0 0 0 0 0 0 0 0 0 0
57249- 0 0 0 0 0 0 0 0 0 0 0 0
57250- 0 0 0 0 0 0 0 0 0 0 0 0
57251- 0 0 0 0 0 0 0 0 0 0 0 0
57252- 0 0 0 0 0 0 0 0 0 0 0 0
57253- 0 0 0 0 0 0 0 0 0 14 14 14
57254- 42 42 42 90 90 90 22 22 22 2 2 6
57255- 42 42 42 2 2 6 18 18 18 218 218 218
57256-253 253 253 253 253 253 253 253 253 253 253 253
57257-253 253 253 253 253 253 253 253 253 253 253 253
57258-253 253 253 253 253 253 253 253 253 253 253 253
57259-253 253 253 253 253 253 253 253 253 253 253 253
57260-253 253 253 253 253 253 253 253 253 253 253 253
57261-253 253 253 253 253 253 250 250 250 221 221 221
57262-218 218 218 101 101 101 2 2 6 14 14 14
57263- 18 18 18 38 38 38 10 10 10 2 2 6
57264- 2 2 6 2 2 6 2 2 6 78 78 78
57265- 58 58 58 22 22 22 6 6 6 0 0 0
57266- 0 0 0 0 0 0 0 0 0 0 0 0
57267- 0 0 0 0 0 0 0 0 0 0 0 0
57268- 0 0 0 0 0 0 0 0 0 0 0 0
57269- 0 0 0 0 0 0 0 0 0 0 0 0
57270- 0 0 0 0 0 0 0 0 0 0 0 0
57271- 0 0 0 0 0 0 0 0 0 0 0 0
57272- 0 0 0 0 0 0 0 0 0 0 0 0
57273- 0 0 0 0 0 0 6 6 6 18 18 18
57274- 54 54 54 82 82 82 2 2 6 26 26 26
57275- 22 22 22 2 2 6 123 123 123 253 253 253
57276-253 253 253 253 253 253 253 253 253 253 253 253
57277-253 253 253 253 253 253 253 253 253 253 253 253
57278-253 253 253 253 253 253 253 253 253 253 253 253
57279-253 253 253 253 253 253 253 253 253 253 253 253
57280-253 253 253 253 253 253 253 253 253 253 253 253
57281-253 253 253 253 253 253 253 253 253 250 250 250
57282-238 238 238 198 198 198 6 6 6 38 38 38
57283- 58 58 58 26 26 26 38 38 38 2 2 6
57284- 2 2 6 2 2 6 2 2 6 46 46 46
57285- 78 78 78 30 30 30 10 10 10 0 0 0
57286- 0 0 0 0 0 0 0 0 0 0 0 0
57287- 0 0 0 0 0 0 0 0 0 0 0 0
57288- 0 0 0 0 0 0 0 0 0 0 0 0
57289- 0 0 0 0 0 0 0 0 0 0 0 0
57290- 0 0 0 0 0 0 0 0 0 0 0 0
57291- 0 0 0 0 0 0 0 0 0 0 0 0
57292- 0 0 0 0 0 0 0 0 0 0 0 0
57293- 0 0 0 0 0 0 10 10 10 30 30 30
57294- 74 74 74 58 58 58 2 2 6 42 42 42
57295- 2 2 6 22 22 22 231 231 231 253 253 253
57296-253 253 253 253 253 253 253 253 253 253 253 253
57297-253 253 253 253 253 253 253 253 253 250 250 250
57298-253 253 253 253 253 253 253 253 253 253 253 253
57299-253 253 253 253 253 253 253 253 253 253 253 253
57300-253 253 253 253 253 253 253 253 253 253 253 253
57301-253 253 253 253 253 253 253 253 253 253 253 253
57302-253 253 253 246 246 246 46 46 46 38 38 38
57303- 42 42 42 14 14 14 38 38 38 14 14 14
57304- 2 2 6 2 2 6 2 2 6 6 6 6
57305- 86 86 86 46 46 46 14 14 14 0 0 0
57306- 0 0 0 0 0 0 0 0 0 0 0 0
57307- 0 0 0 0 0 0 0 0 0 0 0 0
57308- 0 0 0 0 0 0 0 0 0 0 0 0
57309- 0 0 0 0 0 0 0 0 0 0 0 0
57310- 0 0 0 0 0 0 0 0 0 0 0 0
57311- 0 0 0 0 0 0 0 0 0 0 0 0
57312- 0 0 0 0 0 0 0 0 0 0 0 0
57313- 0 0 0 6 6 6 14 14 14 42 42 42
57314- 90 90 90 18 18 18 18 18 18 26 26 26
57315- 2 2 6 116 116 116 253 253 253 253 253 253
57316-253 253 253 253 253 253 253 253 253 253 253 253
57317-253 253 253 253 253 253 250 250 250 238 238 238
57318-253 253 253 253 253 253 253 253 253 253 253 253
57319-253 253 253 253 253 253 253 253 253 253 253 253
57320-253 253 253 253 253 253 253 253 253 253 253 253
57321-253 253 253 253 253 253 253 253 253 253 253 253
57322-253 253 253 253 253 253 94 94 94 6 6 6
57323- 2 2 6 2 2 6 10 10 10 34 34 34
57324- 2 2 6 2 2 6 2 2 6 2 2 6
57325- 74 74 74 58 58 58 22 22 22 6 6 6
57326- 0 0 0 0 0 0 0 0 0 0 0 0
57327- 0 0 0 0 0 0 0 0 0 0 0 0
57328- 0 0 0 0 0 0 0 0 0 0 0 0
57329- 0 0 0 0 0 0 0 0 0 0 0 0
57330- 0 0 0 0 0 0 0 0 0 0 0 0
57331- 0 0 0 0 0 0 0 0 0 0 0 0
57332- 0 0 0 0 0 0 0 0 0 0 0 0
57333- 0 0 0 10 10 10 26 26 26 66 66 66
57334- 82 82 82 2 2 6 38 38 38 6 6 6
57335- 14 14 14 210 210 210 253 253 253 253 253 253
57336-253 253 253 253 253 253 253 253 253 253 253 253
57337-253 253 253 253 253 253 246 246 246 242 242 242
57338-253 253 253 253 253 253 253 253 253 253 253 253
57339-253 253 253 253 253 253 253 253 253 253 253 253
57340-253 253 253 253 253 253 253 253 253 253 253 253
57341-253 253 253 253 253 253 253 253 253 253 253 253
57342-253 253 253 253 253 253 144 144 144 2 2 6
57343- 2 2 6 2 2 6 2 2 6 46 46 46
57344- 2 2 6 2 2 6 2 2 6 2 2 6
57345- 42 42 42 74 74 74 30 30 30 10 10 10
57346- 0 0 0 0 0 0 0 0 0 0 0 0
57347- 0 0 0 0 0 0 0 0 0 0 0 0
57348- 0 0 0 0 0 0 0 0 0 0 0 0
57349- 0 0 0 0 0 0 0 0 0 0 0 0
57350- 0 0 0 0 0 0 0 0 0 0 0 0
57351- 0 0 0 0 0 0 0 0 0 0 0 0
57352- 0 0 0 0 0 0 0 0 0 0 0 0
57353- 6 6 6 14 14 14 42 42 42 90 90 90
57354- 26 26 26 6 6 6 42 42 42 2 2 6
57355- 74 74 74 250 250 250 253 253 253 253 253 253
57356-253 253 253 253 253 253 253 253 253 253 253 253
57357-253 253 253 253 253 253 242 242 242 242 242 242
57358-253 253 253 253 253 253 253 253 253 253 253 253
57359-253 253 253 253 253 253 253 253 253 253 253 253
57360-253 253 253 253 253 253 253 253 253 253 253 253
57361-253 253 253 253 253 253 253 253 253 253 253 253
57362-253 253 253 253 253 253 182 182 182 2 2 6
57363- 2 2 6 2 2 6 2 2 6 46 46 46
57364- 2 2 6 2 2 6 2 2 6 2 2 6
57365- 10 10 10 86 86 86 38 38 38 10 10 10
57366- 0 0 0 0 0 0 0 0 0 0 0 0
57367- 0 0 0 0 0 0 0 0 0 0 0 0
57368- 0 0 0 0 0 0 0 0 0 0 0 0
57369- 0 0 0 0 0 0 0 0 0 0 0 0
57370- 0 0 0 0 0 0 0 0 0 0 0 0
57371- 0 0 0 0 0 0 0 0 0 0 0 0
57372- 0 0 0 0 0 0 0 0 0 0 0 0
57373- 10 10 10 26 26 26 66 66 66 82 82 82
57374- 2 2 6 22 22 22 18 18 18 2 2 6
57375-149 149 149 253 253 253 253 253 253 253 253 253
57376-253 253 253 253 253 253 253 253 253 253 253 253
57377-253 253 253 253 253 253 234 234 234 242 242 242
57378-253 253 253 253 253 253 253 253 253 253 253 253
57379-253 253 253 253 253 253 253 253 253 253 253 253
57380-253 253 253 253 253 253 253 253 253 253 253 253
57381-253 253 253 253 253 253 253 253 253 253 253 253
57382-253 253 253 253 253 253 206 206 206 2 2 6
57383- 2 2 6 2 2 6 2 2 6 38 38 38
57384- 2 2 6 2 2 6 2 2 6 2 2 6
57385- 6 6 6 86 86 86 46 46 46 14 14 14
57386- 0 0 0 0 0 0 0 0 0 0 0 0
57387- 0 0 0 0 0 0 0 0 0 0 0 0
57388- 0 0 0 0 0 0 0 0 0 0 0 0
57389- 0 0 0 0 0 0 0 0 0 0 0 0
57390- 0 0 0 0 0 0 0 0 0 0 0 0
57391- 0 0 0 0 0 0 0 0 0 0 0 0
57392- 0 0 0 0 0 0 0 0 0 6 6 6
57393- 18 18 18 46 46 46 86 86 86 18 18 18
57394- 2 2 6 34 34 34 10 10 10 6 6 6
57395-210 210 210 253 253 253 253 253 253 253 253 253
57396-253 253 253 253 253 253 253 253 253 253 253 253
57397-253 253 253 253 253 253 234 234 234 242 242 242
57398-253 253 253 253 253 253 253 253 253 253 253 253
57399-253 253 253 253 253 253 253 253 253 253 253 253
57400-253 253 253 253 253 253 253 253 253 253 253 253
57401-253 253 253 253 253 253 253 253 253 253 253 253
57402-253 253 253 253 253 253 221 221 221 6 6 6
57403- 2 2 6 2 2 6 6 6 6 30 30 30
57404- 2 2 6 2 2 6 2 2 6 2 2 6
57405- 2 2 6 82 82 82 54 54 54 18 18 18
57406- 6 6 6 0 0 0 0 0 0 0 0 0
57407- 0 0 0 0 0 0 0 0 0 0 0 0
57408- 0 0 0 0 0 0 0 0 0 0 0 0
57409- 0 0 0 0 0 0 0 0 0 0 0 0
57410- 0 0 0 0 0 0 0 0 0 0 0 0
57411- 0 0 0 0 0 0 0 0 0 0 0 0
57412- 0 0 0 0 0 0 0 0 0 10 10 10
57413- 26 26 26 66 66 66 62 62 62 2 2 6
57414- 2 2 6 38 38 38 10 10 10 26 26 26
57415-238 238 238 253 253 253 253 253 253 253 253 253
57416-253 253 253 253 253 253 253 253 253 253 253 253
57417-253 253 253 253 253 253 231 231 231 238 238 238
57418-253 253 253 253 253 253 253 253 253 253 253 253
57419-253 253 253 253 253 253 253 253 253 253 253 253
57420-253 253 253 253 253 253 253 253 253 253 253 253
57421-253 253 253 253 253 253 253 253 253 253 253 253
57422-253 253 253 253 253 253 231 231 231 6 6 6
57423- 2 2 6 2 2 6 10 10 10 30 30 30
57424- 2 2 6 2 2 6 2 2 6 2 2 6
57425- 2 2 6 66 66 66 58 58 58 22 22 22
57426- 6 6 6 0 0 0 0 0 0 0 0 0
57427- 0 0 0 0 0 0 0 0 0 0 0 0
57428- 0 0 0 0 0 0 0 0 0 0 0 0
57429- 0 0 0 0 0 0 0 0 0 0 0 0
57430- 0 0 0 0 0 0 0 0 0 0 0 0
57431- 0 0 0 0 0 0 0 0 0 0 0 0
57432- 0 0 0 0 0 0 0 0 0 10 10 10
57433- 38 38 38 78 78 78 6 6 6 2 2 6
57434- 2 2 6 46 46 46 14 14 14 42 42 42
57435-246 246 246 253 253 253 253 253 253 253 253 253
57436-253 253 253 253 253 253 253 253 253 253 253 253
57437-253 253 253 253 253 253 231 231 231 242 242 242
57438-253 253 253 253 253 253 253 253 253 253 253 253
57439-253 253 253 253 253 253 253 253 253 253 253 253
57440-253 253 253 253 253 253 253 253 253 253 253 253
57441-253 253 253 253 253 253 253 253 253 253 253 253
57442-253 253 253 253 253 253 234 234 234 10 10 10
57443- 2 2 6 2 2 6 22 22 22 14 14 14
57444- 2 2 6 2 2 6 2 2 6 2 2 6
57445- 2 2 6 66 66 66 62 62 62 22 22 22
57446- 6 6 6 0 0 0 0 0 0 0 0 0
57447- 0 0 0 0 0 0 0 0 0 0 0 0
57448- 0 0 0 0 0 0 0 0 0 0 0 0
57449- 0 0 0 0 0 0 0 0 0 0 0 0
57450- 0 0 0 0 0 0 0 0 0 0 0 0
57451- 0 0 0 0 0 0 0 0 0 0 0 0
57452- 0 0 0 0 0 0 6 6 6 18 18 18
57453- 50 50 50 74 74 74 2 2 6 2 2 6
57454- 14 14 14 70 70 70 34 34 34 62 62 62
57455-250 250 250 253 253 253 253 253 253 253 253 253
57456-253 253 253 253 253 253 253 253 253 253 253 253
57457-253 253 253 253 253 253 231 231 231 246 246 246
57458-253 253 253 253 253 253 253 253 253 253 253 253
57459-253 253 253 253 253 253 253 253 253 253 253 253
57460-253 253 253 253 253 253 253 253 253 253 253 253
57461-253 253 253 253 253 253 253 253 253 253 253 253
57462-253 253 253 253 253 253 234 234 234 14 14 14
57463- 2 2 6 2 2 6 30 30 30 2 2 6
57464- 2 2 6 2 2 6 2 2 6 2 2 6
57465- 2 2 6 66 66 66 62 62 62 22 22 22
57466- 6 6 6 0 0 0 0 0 0 0 0 0
57467- 0 0 0 0 0 0 0 0 0 0 0 0
57468- 0 0 0 0 0 0 0 0 0 0 0 0
57469- 0 0 0 0 0 0 0 0 0 0 0 0
57470- 0 0 0 0 0 0 0 0 0 0 0 0
57471- 0 0 0 0 0 0 0 0 0 0 0 0
57472- 0 0 0 0 0 0 6 6 6 18 18 18
57473- 54 54 54 62 62 62 2 2 6 2 2 6
57474- 2 2 6 30 30 30 46 46 46 70 70 70
57475-250 250 250 253 253 253 253 253 253 253 253 253
57476-253 253 253 253 253 253 253 253 253 253 253 253
57477-253 253 253 253 253 253 231 231 231 246 246 246
57478-253 253 253 253 253 253 253 253 253 253 253 253
57479-253 253 253 253 253 253 253 253 253 253 253 253
57480-253 253 253 253 253 253 253 253 253 253 253 253
57481-253 253 253 253 253 253 253 253 253 253 253 253
57482-253 253 253 253 253 253 226 226 226 10 10 10
57483- 2 2 6 6 6 6 30 30 30 2 2 6
57484- 2 2 6 2 2 6 2 2 6 2 2 6
57485- 2 2 6 66 66 66 58 58 58 22 22 22
57486- 6 6 6 0 0 0 0 0 0 0 0 0
57487- 0 0 0 0 0 0 0 0 0 0 0 0
57488- 0 0 0 0 0 0 0 0 0 0 0 0
57489- 0 0 0 0 0 0 0 0 0 0 0 0
57490- 0 0 0 0 0 0 0 0 0 0 0 0
57491- 0 0 0 0 0 0 0 0 0 0 0 0
57492- 0 0 0 0 0 0 6 6 6 22 22 22
57493- 58 58 58 62 62 62 2 2 6 2 2 6
57494- 2 2 6 2 2 6 30 30 30 78 78 78
57495-250 250 250 253 253 253 253 253 253 253 253 253
57496-253 253 253 253 253 253 253 253 253 253 253 253
57497-253 253 253 253 253 253 231 231 231 246 246 246
57498-253 253 253 253 253 253 253 253 253 253 253 253
57499-253 253 253 253 253 253 253 253 253 253 253 253
57500-253 253 253 253 253 253 253 253 253 253 253 253
57501-253 253 253 253 253 253 253 253 253 253 253 253
57502-253 253 253 253 253 253 206 206 206 2 2 6
57503- 22 22 22 34 34 34 18 14 6 22 22 22
57504- 26 26 26 18 18 18 6 6 6 2 2 6
57505- 2 2 6 82 82 82 54 54 54 18 18 18
57506- 6 6 6 0 0 0 0 0 0 0 0 0
57507- 0 0 0 0 0 0 0 0 0 0 0 0
57508- 0 0 0 0 0 0 0 0 0 0 0 0
57509- 0 0 0 0 0 0 0 0 0 0 0 0
57510- 0 0 0 0 0 0 0 0 0 0 0 0
57511- 0 0 0 0 0 0 0 0 0 0 0 0
57512- 0 0 0 0 0 0 6 6 6 26 26 26
57513- 62 62 62 106 106 106 74 54 14 185 133 11
57514-210 162 10 121 92 8 6 6 6 62 62 62
57515-238 238 238 253 253 253 253 253 253 253 253 253
57516-253 253 253 253 253 253 253 253 253 253 253 253
57517-253 253 253 253 253 253 231 231 231 246 246 246
57518-253 253 253 253 253 253 253 253 253 253 253 253
57519-253 253 253 253 253 253 253 253 253 253 253 253
57520-253 253 253 253 253 253 253 253 253 253 253 253
57521-253 253 253 253 253 253 253 253 253 253 253 253
57522-253 253 253 253 253 253 158 158 158 18 18 18
57523- 14 14 14 2 2 6 2 2 6 2 2 6
57524- 6 6 6 18 18 18 66 66 66 38 38 38
57525- 6 6 6 94 94 94 50 50 50 18 18 18
57526- 6 6 6 0 0 0 0 0 0 0 0 0
57527- 0 0 0 0 0 0 0 0 0 0 0 0
57528- 0 0 0 0 0 0 0 0 0 0 0 0
57529- 0 0 0 0 0 0 0 0 0 0 0 0
57530- 0 0 0 0 0 0 0 0 0 0 0 0
57531- 0 0 0 0 0 0 0 0 0 6 6 6
57532- 10 10 10 10 10 10 18 18 18 38 38 38
57533- 78 78 78 142 134 106 216 158 10 242 186 14
57534-246 190 14 246 190 14 156 118 10 10 10 10
57535- 90 90 90 238 238 238 253 253 253 253 253 253
57536-253 253 253 253 253 253 253 253 253 253 253 253
57537-253 253 253 253 253 253 231 231 231 250 250 250
57538-253 253 253 253 253 253 253 253 253 253 253 253
57539-253 253 253 253 253 253 253 253 253 253 253 253
57540-253 253 253 253 253 253 253 253 253 253 253 253
57541-253 253 253 253 253 253 253 253 253 246 230 190
57542-238 204 91 238 204 91 181 142 44 37 26 9
57543- 2 2 6 2 2 6 2 2 6 2 2 6
57544- 2 2 6 2 2 6 38 38 38 46 46 46
57545- 26 26 26 106 106 106 54 54 54 18 18 18
57546- 6 6 6 0 0 0 0 0 0 0 0 0
57547- 0 0 0 0 0 0 0 0 0 0 0 0
57548- 0 0 0 0 0 0 0 0 0 0 0 0
57549- 0 0 0 0 0 0 0 0 0 0 0 0
57550- 0 0 0 0 0 0 0 0 0 0 0 0
57551- 0 0 0 6 6 6 14 14 14 22 22 22
57552- 30 30 30 38 38 38 50 50 50 70 70 70
57553-106 106 106 190 142 34 226 170 11 242 186 14
57554-246 190 14 246 190 14 246 190 14 154 114 10
57555- 6 6 6 74 74 74 226 226 226 253 253 253
57556-253 253 253 253 253 253 253 253 253 253 253 253
57557-253 253 253 253 253 253 231 231 231 250 250 250
57558-253 253 253 253 253 253 253 253 253 253 253 253
57559-253 253 253 253 253 253 253 253 253 253 253 253
57560-253 253 253 253 253 253 253 253 253 253 253 253
57561-253 253 253 253 253 253 253 253 253 228 184 62
57562-241 196 14 241 208 19 232 195 16 38 30 10
57563- 2 2 6 2 2 6 2 2 6 2 2 6
57564- 2 2 6 6 6 6 30 30 30 26 26 26
57565-203 166 17 154 142 90 66 66 66 26 26 26
57566- 6 6 6 0 0 0 0 0 0 0 0 0
57567- 0 0 0 0 0 0 0 0 0 0 0 0
57568- 0 0 0 0 0 0 0 0 0 0 0 0
57569- 0 0 0 0 0 0 0 0 0 0 0 0
57570- 0 0 0 0 0 0 0 0 0 0 0 0
57571- 6 6 6 18 18 18 38 38 38 58 58 58
57572- 78 78 78 86 86 86 101 101 101 123 123 123
57573-175 146 61 210 150 10 234 174 13 246 186 14
57574-246 190 14 246 190 14 246 190 14 238 190 10
57575-102 78 10 2 2 6 46 46 46 198 198 198
57576-253 253 253 253 253 253 253 253 253 253 253 253
57577-253 253 253 253 253 253 234 234 234 242 242 242
57578-253 253 253 253 253 253 253 253 253 253 253 253
57579-253 253 253 253 253 253 253 253 253 253 253 253
57580-253 253 253 253 253 253 253 253 253 253 253 253
57581-253 253 253 253 253 253 253 253 253 224 178 62
57582-242 186 14 241 196 14 210 166 10 22 18 6
57583- 2 2 6 2 2 6 2 2 6 2 2 6
57584- 2 2 6 2 2 6 6 6 6 121 92 8
57585-238 202 15 232 195 16 82 82 82 34 34 34
57586- 10 10 10 0 0 0 0 0 0 0 0 0
57587- 0 0 0 0 0 0 0 0 0 0 0 0
57588- 0 0 0 0 0 0 0 0 0 0 0 0
57589- 0 0 0 0 0 0 0 0 0 0 0 0
57590- 0 0 0 0 0 0 0 0 0 0 0 0
57591- 14 14 14 38 38 38 70 70 70 154 122 46
57592-190 142 34 200 144 11 197 138 11 197 138 11
57593-213 154 11 226 170 11 242 186 14 246 190 14
57594-246 190 14 246 190 14 246 190 14 246 190 14
57595-225 175 15 46 32 6 2 2 6 22 22 22
57596-158 158 158 250 250 250 253 253 253 253 253 253
57597-253 253 253 253 253 253 253 253 253 253 253 253
57598-253 253 253 253 253 253 253 253 253 253 253 253
57599-253 253 253 253 253 253 253 253 253 253 253 253
57600-253 253 253 253 253 253 253 253 253 253 253 253
57601-253 253 253 250 250 250 242 242 242 224 178 62
57602-239 182 13 236 186 11 213 154 11 46 32 6
57603- 2 2 6 2 2 6 2 2 6 2 2 6
57604- 2 2 6 2 2 6 61 42 6 225 175 15
57605-238 190 10 236 186 11 112 100 78 42 42 42
57606- 14 14 14 0 0 0 0 0 0 0 0 0
57607- 0 0 0 0 0 0 0 0 0 0 0 0
57608- 0 0 0 0 0 0 0 0 0 0 0 0
57609- 0 0 0 0 0 0 0 0 0 0 0 0
57610- 0 0 0 0 0 0 0 0 0 6 6 6
57611- 22 22 22 54 54 54 154 122 46 213 154 11
57612-226 170 11 230 174 11 226 170 11 226 170 11
57613-236 178 12 242 186 14 246 190 14 246 190 14
57614-246 190 14 246 190 14 246 190 14 246 190 14
57615-241 196 14 184 144 12 10 10 10 2 2 6
57616- 6 6 6 116 116 116 242 242 242 253 253 253
57617-253 253 253 253 253 253 253 253 253 253 253 253
57618-253 253 253 253 253 253 253 253 253 253 253 253
57619-253 253 253 253 253 253 253 253 253 253 253 253
57620-253 253 253 253 253 253 253 253 253 253 253 253
57621-253 253 253 231 231 231 198 198 198 214 170 54
57622-236 178 12 236 178 12 210 150 10 137 92 6
57623- 18 14 6 2 2 6 2 2 6 2 2 6
57624- 6 6 6 70 47 6 200 144 11 236 178 12
57625-239 182 13 239 182 13 124 112 88 58 58 58
57626- 22 22 22 6 6 6 0 0 0 0 0 0
57627- 0 0 0 0 0 0 0 0 0 0 0 0
57628- 0 0 0 0 0 0 0 0 0 0 0 0
57629- 0 0 0 0 0 0 0 0 0 0 0 0
57630- 0 0 0 0 0 0 0 0 0 10 10 10
57631- 30 30 30 70 70 70 180 133 36 226 170 11
57632-239 182 13 242 186 14 242 186 14 246 186 14
57633-246 190 14 246 190 14 246 190 14 246 190 14
57634-246 190 14 246 190 14 246 190 14 246 190 14
57635-246 190 14 232 195 16 98 70 6 2 2 6
57636- 2 2 6 2 2 6 66 66 66 221 221 221
57637-253 253 253 253 253 253 253 253 253 253 253 253
57638-253 253 253 253 253 253 253 253 253 253 253 253
57639-253 253 253 253 253 253 253 253 253 253 253 253
57640-253 253 253 253 253 253 253 253 253 253 253 253
57641-253 253 253 206 206 206 198 198 198 214 166 58
57642-230 174 11 230 174 11 216 158 10 192 133 9
57643-163 110 8 116 81 8 102 78 10 116 81 8
57644-167 114 7 197 138 11 226 170 11 239 182 13
57645-242 186 14 242 186 14 162 146 94 78 78 78
57646- 34 34 34 14 14 14 6 6 6 0 0 0
57647- 0 0 0 0 0 0 0 0 0 0 0 0
57648- 0 0 0 0 0 0 0 0 0 0 0 0
57649- 0 0 0 0 0 0 0 0 0 0 0 0
57650- 0 0 0 0 0 0 0 0 0 6 6 6
57651- 30 30 30 78 78 78 190 142 34 226 170 11
57652-239 182 13 246 190 14 246 190 14 246 190 14
57653-246 190 14 246 190 14 246 190 14 246 190 14
57654-246 190 14 246 190 14 246 190 14 246 190 14
57655-246 190 14 241 196 14 203 166 17 22 18 6
57656- 2 2 6 2 2 6 2 2 6 38 38 38
57657-218 218 218 253 253 253 253 253 253 253 253 253
57658-253 253 253 253 253 253 253 253 253 253 253 253
57659-253 253 253 253 253 253 253 253 253 253 253 253
57660-253 253 253 253 253 253 253 253 253 253 253 253
57661-250 250 250 206 206 206 198 198 198 202 162 69
57662-226 170 11 236 178 12 224 166 10 210 150 10
57663-200 144 11 197 138 11 192 133 9 197 138 11
57664-210 150 10 226 170 11 242 186 14 246 190 14
57665-246 190 14 246 186 14 225 175 15 124 112 88
57666- 62 62 62 30 30 30 14 14 14 6 6 6
57667- 0 0 0 0 0 0 0 0 0 0 0 0
57668- 0 0 0 0 0 0 0 0 0 0 0 0
57669- 0 0 0 0 0 0 0 0 0 0 0 0
57670- 0 0 0 0 0 0 0 0 0 10 10 10
57671- 30 30 30 78 78 78 174 135 50 224 166 10
57672-239 182 13 246 190 14 246 190 14 246 190 14
57673-246 190 14 246 190 14 246 190 14 246 190 14
57674-246 190 14 246 190 14 246 190 14 246 190 14
57675-246 190 14 246 190 14 241 196 14 139 102 15
57676- 2 2 6 2 2 6 2 2 6 2 2 6
57677- 78 78 78 250 250 250 253 253 253 253 253 253
57678-253 253 253 253 253 253 253 253 253 253 253 253
57679-253 253 253 253 253 253 253 253 253 253 253 253
57680-253 253 253 253 253 253 253 253 253 253 253 253
57681-250 250 250 214 214 214 198 198 198 190 150 46
57682-219 162 10 236 178 12 234 174 13 224 166 10
57683-216 158 10 213 154 11 213 154 11 216 158 10
57684-226 170 11 239 182 13 246 190 14 246 190 14
57685-246 190 14 246 190 14 242 186 14 206 162 42
57686-101 101 101 58 58 58 30 30 30 14 14 14
57687- 6 6 6 0 0 0 0 0 0 0 0 0
57688- 0 0 0 0 0 0 0 0 0 0 0 0
57689- 0 0 0 0 0 0 0 0 0 0 0 0
57690- 0 0 0 0 0 0 0 0 0 10 10 10
57691- 30 30 30 74 74 74 174 135 50 216 158 10
57692-236 178 12 246 190 14 246 190 14 246 190 14
57693-246 190 14 246 190 14 246 190 14 246 190 14
57694-246 190 14 246 190 14 246 190 14 246 190 14
57695-246 190 14 246 190 14 241 196 14 226 184 13
57696- 61 42 6 2 2 6 2 2 6 2 2 6
57697- 22 22 22 238 238 238 253 253 253 253 253 253
57698-253 253 253 253 253 253 253 253 253 253 253 253
57699-253 253 253 253 253 253 253 253 253 253 253 253
57700-253 253 253 253 253 253 253 253 253 253 253 253
57701-253 253 253 226 226 226 187 187 187 180 133 36
57702-216 158 10 236 178 12 239 182 13 236 178 12
57703-230 174 11 226 170 11 226 170 11 230 174 11
57704-236 178 12 242 186 14 246 190 14 246 190 14
57705-246 190 14 246 190 14 246 186 14 239 182 13
57706-206 162 42 106 106 106 66 66 66 34 34 34
57707- 14 14 14 6 6 6 0 0 0 0 0 0
57708- 0 0 0 0 0 0 0 0 0 0 0 0
57709- 0 0 0 0 0 0 0 0 0 0 0 0
57710- 0 0 0 0 0 0 0 0 0 6 6 6
57711- 26 26 26 70 70 70 163 133 67 213 154 11
57712-236 178 12 246 190 14 246 190 14 246 190 14
57713-246 190 14 246 190 14 246 190 14 246 190 14
57714-246 190 14 246 190 14 246 190 14 246 190 14
57715-246 190 14 246 190 14 246 190 14 241 196 14
57716-190 146 13 18 14 6 2 2 6 2 2 6
57717- 46 46 46 246 246 246 253 253 253 253 253 253
57718-253 253 253 253 253 253 253 253 253 253 253 253
57719-253 253 253 253 253 253 253 253 253 253 253 253
57720-253 253 253 253 253 253 253 253 253 253 253 253
57721-253 253 253 221 221 221 86 86 86 156 107 11
57722-216 158 10 236 178 12 242 186 14 246 186 14
57723-242 186 14 239 182 13 239 182 13 242 186 14
57724-242 186 14 246 186 14 246 190 14 246 190 14
57725-246 190 14 246 190 14 246 190 14 246 190 14
57726-242 186 14 225 175 15 142 122 72 66 66 66
57727- 30 30 30 10 10 10 0 0 0 0 0 0
57728- 0 0 0 0 0 0 0 0 0 0 0 0
57729- 0 0 0 0 0 0 0 0 0 0 0 0
57730- 0 0 0 0 0 0 0 0 0 6 6 6
57731- 26 26 26 70 70 70 163 133 67 210 150 10
57732-236 178 12 246 190 14 246 190 14 246 190 14
57733-246 190 14 246 190 14 246 190 14 246 190 14
57734-246 190 14 246 190 14 246 190 14 246 190 14
57735-246 190 14 246 190 14 246 190 14 246 190 14
57736-232 195 16 121 92 8 34 34 34 106 106 106
57737-221 221 221 253 253 253 253 253 253 253 253 253
57738-253 253 253 253 253 253 253 253 253 253 253 253
57739-253 253 253 253 253 253 253 253 253 253 253 253
57740-253 253 253 253 253 253 253 253 253 253 253 253
57741-242 242 242 82 82 82 18 14 6 163 110 8
57742-216 158 10 236 178 12 242 186 14 246 190 14
57743-246 190 14 246 190 14 246 190 14 246 190 14
57744-246 190 14 246 190 14 246 190 14 246 190 14
57745-246 190 14 246 190 14 246 190 14 246 190 14
57746-246 190 14 246 190 14 242 186 14 163 133 67
57747- 46 46 46 18 18 18 6 6 6 0 0 0
57748- 0 0 0 0 0 0 0 0 0 0 0 0
57749- 0 0 0 0 0 0 0 0 0 0 0 0
57750- 0 0 0 0 0 0 0 0 0 10 10 10
57751- 30 30 30 78 78 78 163 133 67 210 150 10
57752-236 178 12 246 186 14 246 190 14 246 190 14
57753-246 190 14 246 190 14 246 190 14 246 190 14
57754-246 190 14 246 190 14 246 190 14 246 190 14
57755-246 190 14 246 190 14 246 190 14 246 190 14
57756-241 196 14 215 174 15 190 178 144 253 253 253
57757-253 253 253 253 253 253 253 253 253 253 253 253
57758-253 253 253 253 253 253 253 253 253 253 253 253
57759-253 253 253 253 253 253 253 253 253 253 253 253
57760-253 253 253 253 253 253 253 253 253 218 218 218
57761- 58 58 58 2 2 6 22 18 6 167 114 7
57762-216 158 10 236 178 12 246 186 14 246 190 14
57763-246 190 14 246 190 14 246 190 14 246 190 14
57764-246 190 14 246 190 14 246 190 14 246 190 14
57765-246 190 14 246 190 14 246 190 14 246 190 14
57766-246 190 14 246 186 14 242 186 14 190 150 46
57767- 54 54 54 22 22 22 6 6 6 0 0 0
57768- 0 0 0 0 0 0 0 0 0 0 0 0
57769- 0 0 0 0 0 0 0 0 0 0 0 0
57770- 0 0 0 0 0 0 0 0 0 14 14 14
57771- 38 38 38 86 86 86 180 133 36 213 154 11
57772-236 178 12 246 186 14 246 190 14 246 190 14
57773-246 190 14 246 190 14 246 190 14 246 190 14
57774-246 190 14 246 190 14 246 190 14 246 190 14
57775-246 190 14 246 190 14 246 190 14 246 190 14
57776-246 190 14 232 195 16 190 146 13 214 214 214
57777-253 253 253 253 253 253 253 253 253 253 253 253
57778-253 253 253 253 253 253 253 253 253 253 253 253
57779-253 253 253 253 253 253 253 253 253 253 253 253
57780-253 253 253 250 250 250 170 170 170 26 26 26
57781- 2 2 6 2 2 6 37 26 9 163 110 8
57782-219 162 10 239 182 13 246 186 14 246 190 14
57783-246 190 14 246 190 14 246 190 14 246 190 14
57784-246 190 14 246 190 14 246 190 14 246 190 14
57785-246 190 14 246 190 14 246 190 14 246 190 14
57786-246 186 14 236 178 12 224 166 10 142 122 72
57787- 46 46 46 18 18 18 6 6 6 0 0 0
57788- 0 0 0 0 0 0 0 0 0 0 0 0
57789- 0 0 0 0 0 0 0 0 0 0 0 0
57790- 0 0 0 0 0 0 6 6 6 18 18 18
57791- 50 50 50 109 106 95 192 133 9 224 166 10
57792-242 186 14 246 190 14 246 190 14 246 190 14
57793-246 190 14 246 190 14 246 190 14 246 190 14
57794-246 190 14 246 190 14 246 190 14 246 190 14
57795-246 190 14 246 190 14 246 190 14 246 190 14
57796-242 186 14 226 184 13 210 162 10 142 110 46
57797-226 226 226 253 253 253 253 253 253 253 253 253
57798-253 253 253 253 253 253 253 253 253 253 253 253
57799-253 253 253 253 253 253 253 253 253 253 253 253
57800-198 198 198 66 66 66 2 2 6 2 2 6
57801- 2 2 6 2 2 6 50 34 6 156 107 11
57802-219 162 10 239 182 13 246 186 14 246 190 14
57803-246 190 14 246 190 14 246 190 14 246 190 14
57804-246 190 14 246 190 14 246 190 14 246 190 14
57805-246 190 14 246 190 14 246 190 14 242 186 14
57806-234 174 13 213 154 11 154 122 46 66 66 66
57807- 30 30 30 10 10 10 0 0 0 0 0 0
57808- 0 0 0 0 0 0 0 0 0 0 0 0
57809- 0 0 0 0 0 0 0 0 0 0 0 0
57810- 0 0 0 0 0 0 6 6 6 22 22 22
57811- 58 58 58 154 121 60 206 145 10 234 174 13
57812-242 186 14 246 186 14 246 190 14 246 190 14
57813-246 190 14 246 190 14 246 190 14 246 190 14
57814-246 190 14 246 190 14 246 190 14 246 190 14
57815-246 190 14 246 190 14 246 190 14 246 190 14
57816-246 186 14 236 178 12 210 162 10 163 110 8
57817- 61 42 6 138 138 138 218 218 218 250 250 250
57818-253 253 253 253 253 253 253 253 253 250 250 250
57819-242 242 242 210 210 210 144 144 144 66 66 66
57820- 6 6 6 2 2 6 2 2 6 2 2 6
57821- 2 2 6 2 2 6 61 42 6 163 110 8
57822-216 158 10 236 178 12 246 190 14 246 190 14
57823-246 190 14 246 190 14 246 190 14 246 190 14
57824-246 190 14 246 190 14 246 190 14 246 190 14
57825-246 190 14 239 182 13 230 174 11 216 158 10
57826-190 142 34 124 112 88 70 70 70 38 38 38
57827- 18 18 18 6 6 6 0 0 0 0 0 0
57828- 0 0 0 0 0 0 0 0 0 0 0 0
57829- 0 0 0 0 0 0 0 0 0 0 0 0
57830- 0 0 0 0 0 0 6 6 6 22 22 22
57831- 62 62 62 168 124 44 206 145 10 224 166 10
57832-236 178 12 239 182 13 242 186 14 242 186 14
57833-246 186 14 246 190 14 246 190 14 246 190 14
57834-246 190 14 246 190 14 246 190 14 246 190 14
57835-246 190 14 246 190 14 246 190 14 246 190 14
57836-246 190 14 236 178 12 216 158 10 175 118 6
57837- 80 54 7 2 2 6 6 6 6 30 30 30
57838- 54 54 54 62 62 62 50 50 50 38 38 38
57839- 14 14 14 2 2 6 2 2 6 2 2 6
57840- 2 2 6 2 2 6 2 2 6 2 2 6
57841- 2 2 6 6 6 6 80 54 7 167 114 7
57842-213 154 11 236 178 12 246 190 14 246 190 14
57843-246 190 14 246 190 14 246 190 14 246 190 14
57844-246 190 14 242 186 14 239 182 13 239 182 13
57845-230 174 11 210 150 10 174 135 50 124 112 88
57846- 82 82 82 54 54 54 34 34 34 18 18 18
57847- 6 6 6 0 0 0 0 0 0 0 0 0
57848- 0 0 0 0 0 0 0 0 0 0 0 0
57849- 0 0 0 0 0 0 0 0 0 0 0 0
57850- 0 0 0 0 0 0 6 6 6 18 18 18
57851- 50 50 50 158 118 36 192 133 9 200 144 11
57852-216 158 10 219 162 10 224 166 10 226 170 11
57853-230 174 11 236 178 12 239 182 13 239 182 13
57854-242 186 14 246 186 14 246 190 14 246 190 14
57855-246 190 14 246 190 14 246 190 14 246 190 14
57856-246 186 14 230 174 11 210 150 10 163 110 8
57857-104 69 6 10 10 10 2 2 6 2 2 6
57858- 2 2 6 2 2 6 2 2 6 2 2 6
57859- 2 2 6 2 2 6 2 2 6 2 2 6
57860- 2 2 6 2 2 6 2 2 6 2 2 6
57861- 2 2 6 6 6 6 91 60 6 167 114 7
57862-206 145 10 230 174 11 242 186 14 246 190 14
57863-246 190 14 246 190 14 246 186 14 242 186 14
57864-239 182 13 230 174 11 224 166 10 213 154 11
57865-180 133 36 124 112 88 86 86 86 58 58 58
57866- 38 38 38 22 22 22 10 10 10 6 6 6
57867- 0 0 0 0 0 0 0 0 0 0 0 0
57868- 0 0 0 0 0 0 0 0 0 0 0 0
57869- 0 0 0 0 0 0 0 0 0 0 0 0
57870- 0 0 0 0 0 0 0 0 0 14 14 14
57871- 34 34 34 70 70 70 138 110 50 158 118 36
57872-167 114 7 180 123 7 192 133 9 197 138 11
57873-200 144 11 206 145 10 213 154 11 219 162 10
57874-224 166 10 230 174 11 239 182 13 242 186 14
57875-246 186 14 246 186 14 246 186 14 246 186 14
57876-239 182 13 216 158 10 185 133 11 152 99 6
57877-104 69 6 18 14 6 2 2 6 2 2 6
57878- 2 2 6 2 2 6 2 2 6 2 2 6
57879- 2 2 6 2 2 6 2 2 6 2 2 6
57880- 2 2 6 2 2 6 2 2 6 2 2 6
57881- 2 2 6 6 6 6 80 54 7 152 99 6
57882-192 133 9 219 162 10 236 178 12 239 182 13
57883-246 186 14 242 186 14 239 182 13 236 178 12
57884-224 166 10 206 145 10 192 133 9 154 121 60
57885- 94 94 94 62 62 62 42 42 42 22 22 22
57886- 14 14 14 6 6 6 0 0 0 0 0 0
57887- 0 0 0 0 0 0 0 0 0 0 0 0
57888- 0 0 0 0 0 0 0 0 0 0 0 0
57889- 0 0 0 0 0 0 0 0 0 0 0 0
57890- 0 0 0 0 0 0 0 0 0 6 6 6
57891- 18 18 18 34 34 34 58 58 58 78 78 78
57892-101 98 89 124 112 88 142 110 46 156 107 11
57893-163 110 8 167 114 7 175 118 6 180 123 7
57894-185 133 11 197 138 11 210 150 10 219 162 10
57895-226 170 11 236 178 12 236 178 12 234 174 13
57896-219 162 10 197 138 11 163 110 8 130 83 6
57897- 91 60 6 10 10 10 2 2 6 2 2 6
57898- 18 18 18 38 38 38 38 38 38 38 38 38
57899- 38 38 38 38 38 38 38 38 38 38 38 38
57900- 38 38 38 38 38 38 26 26 26 2 2 6
57901- 2 2 6 6 6 6 70 47 6 137 92 6
57902-175 118 6 200 144 11 219 162 10 230 174 11
57903-234 174 13 230 174 11 219 162 10 210 150 10
57904-192 133 9 163 110 8 124 112 88 82 82 82
57905- 50 50 50 30 30 30 14 14 14 6 6 6
57906- 0 0 0 0 0 0 0 0 0 0 0 0
57907- 0 0 0 0 0 0 0 0 0 0 0 0
57908- 0 0 0 0 0 0 0 0 0 0 0 0
57909- 0 0 0 0 0 0 0 0 0 0 0 0
57910- 0 0 0 0 0 0 0 0 0 0 0 0
57911- 6 6 6 14 14 14 22 22 22 34 34 34
57912- 42 42 42 58 58 58 74 74 74 86 86 86
57913-101 98 89 122 102 70 130 98 46 121 87 25
57914-137 92 6 152 99 6 163 110 8 180 123 7
57915-185 133 11 197 138 11 206 145 10 200 144 11
57916-180 123 7 156 107 11 130 83 6 104 69 6
57917- 50 34 6 54 54 54 110 110 110 101 98 89
57918- 86 86 86 82 82 82 78 78 78 78 78 78
57919- 78 78 78 78 78 78 78 78 78 78 78 78
57920- 78 78 78 82 82 82 86 86 86 94 94 94
57921-106 106 106 101 101 101 86 66 34 124 80 6
57922-156 107 11 180 123 7 192 133 9 200 144 11
57923-206 145 10 200 144 11 192 133 9 175 118 6
57924-139 102 15 109 106 95 70 70 70 42 42 42
57925- 22 22 22 10 10 10 0 0 0 0 0 0
57926- 0 0 0 0 0 0 0 0 0 0 0 0
57927- 0 0 0 0 0 0 0 0 0 0 0 0
57928- 0 0 0 0 0 0 0 0 0 0 0 0
57929- 0 0 0 0 0 0 0 0 0 0 0 0
57930- 0 0 0 0 0 0 0 0 0 0 0 0
57931- 0 0 0 0 0 0 6 6 6 10 10 10
57932- 14 14 14 22 22 22 30 30 30 38 38 38
57933- 50 50 50 62 62 62 74 74 74 90 90 90
57934-101 98 89 112 100 78 121 87 25 124 80 6
57935-137 92 6 152 99 6 152 99 6 152 99 6
57936-138 86 6 124 80 6 98 70 6 86 66 30
57937-101 98 89 82 82 82 58 58 58 46 46 46
57938- 38 38 38 34 34 34 34 34 34 34 34 34
57939- 34 34 34 34 34 34 34 34 34 34 34 34
57940- 34 34 34 34 34 34 38 38 38 42 42 42
57941- 54 54 54 82 82 82 94 86 76 91 60 6
57942-134 86 6 156 107 11 167 114 7 175 118 6
57943-175 118 6 167 114 7 152 99 6 121 87 25
57944-101 98 89 62 62 62 34 34 34 18 18 18
57945- 6 6 6 0 0 0 0 0 0 0 0 0
57946- 0 0 0 0 0 0 0 0 0 0 0 0
57947- 0 0 0 0 0 0 0 0 0 0 0 0
57948- 0 0 0 0 0 0 0 0 0 0 0 0
57949- 0 0 0 0 0 0 0 0 0 0 0 0
57950- 0 0 0 0 0 0 0 0 0 0 0 0
57951- 0 0 0 0 0 0 0 0 0 0 0 0
57952- 0 0 0 6 6 6 6 6 6 10 10 10
57953- 18 18 18 22 22 22 30 30 30 42 42 42
57954- 50 50 50 66 66 66 86 86 86 101 98 89
57955-106 86 58 98 70 6 104 69 6 104 69 6
57956-104 69 6 91 60 6 82 62 34 90 90 90
57957- 62 62 62 38 38 38 22 22 22 14 14 14
57958- 10 10 10 10 10 10 10 10 10 10 10 10
57959- 10 10 10 10 10 10 6 6 6 10 10 10
57960- 10 10 10 10 10 10 10 10 10 14 14 14
57961- 22 22 22 42 42 42 70 70 70 89 81 66
57962- 80 54 7 104 69 6 124 80 6 137 92 6
57963-134 86 6 116 81 8 100 82 52 86 86 86
57964- 58 58 58 30 30 30 14 14 14 6 6 6
57965- 0 0 0 0 0 0 0 0 0 0 0 0
57966- 0 0 0 0 0 0 0 0 0 0 0 0
57967- 0 0 0 0 0 0 0 0 0 0 0 0
57968- 0 0 0 0 0 0 0 0 0 0 0 0
57969- 0 0 0 0 0 0 0 0 0 0 0 0
57970- 0 0 0 0 0 0 0 0 0 0 0 0
57971- 0 0 0 0 0 0 0 0 0 0 0 0
57972- 0 0 0 0 0 0 0 0 0 0 0 0
57973- 0 0 0 6 6 6 10 10 10 14 14 14
57974- 18 18 18 26 26 26 38 38 38 54 54 54
57975- 70 70 70 86 86 86 94 86 76 89 81 66
57976- 89 81 66 86 86 86 74 74 74 50 50 50
57977- 30 30 30 14 14 14 6 6 6 0 0 0
57978- 0 0 0 0 0 0 0 0 0 0 0 0
57979- 0 0 0 0 0 0 0 0 0 0 0 0
57980- 0 0 0 0 0 0 0 0 0 0 0 0
57981- 6 6 6 18 18 18 34 34 34 58 58 58
57982- 82 82 82 89 81 66 89 81 66 89 81 66
57983- 94 86 66 94 86 76 74 74 74 50 50 50
57984- 26 26 26 14 14 14 6 6 6 0 0 0
57985- 0 0 0 0 0 0 0 0 0 0 0 0
57986- 0 0 0 0 0 0 0 0 0 0 0 0
57987- 0 0 0 0 0 0 0 0 0 0 0 0
57988- 0 0 0 0 0 0 0 0 0 0 0 0
57989- 0 0 0 0 0 0 0 0 0 0 0 0
57990- 0 0 0 0 0 0 0 0 0 0 0 0
57991- 0 0 0 0 0 0 0 0 0 0 0 0
57992- 0 0 0 0 0 0 0 0 0 0 0 0
57993- 0 0 0 0 0 0 0 0 0 0 0 0
57994- 6 6 6 6 6 6 14 14 14 18 18 18
57995- 30 30 30 38 38 38 46 46 46 54 54 54
57996- 50 50 50 42 42 42 30 30 30 18 18 18
57997- 10 10 10 0 0 0 0 0 0 0 0 0
57998- 0 0 0 0 0 0 0 0 0 0 0 0
57999- 0 0 0 0 0 0 0 0 0 0 0 0
58000- 0 0 0 0 0 0 0 0 0 0 0 0
58001- 0 0 0 6 6 6 14 14 14 26 26 26
58002- 38 38 38 50 50 50 58 58 58 58 58 58
58003- 54 54 54 42 42 42 30 30 30 18 18 18
58004- 10 10 10 0 0 0 0 0 0 0 0 0
58005- 0 0 0 0 0 0 0 0 0 0 0 0
58006- 0 0 0 0 0 0 0 0 0 0 0 0
58007- 0 0 0 0 0 0 0 0 0 0 0 0
58008- 0 0 0 0 0 0 0 0 0 0 0 0
58009- 0 0 0 0 0 0 0 0 0 0 0 0
58010- 0 0 0 0 0 0 0 0 0 0 0 0
58011- 0 0 0 0 0 0 0 0 0 0 0 0
58012- 0 0 0 0 0 0 0 0 0 0 0 0
58013- 0 0 0 0 0 0 0 0 0 0 0 0
58014- 0 0 0 0 0 0 0 0 0 6 6 6
58015- 6 6 6 10 10 10 14 14 14 18 18 18
58016- 18 18 18 14 14 14 10 10 10 6 6 6
58017- 0 0 0 0 0 0 0 0 0 0 0 0
58018- 0 0 0 0 0 0 0 0 0 0 0 0
58019- 0 0 0 0 0 0 0 0 0 0 0 0
58020- 0 0 0 0 0 0 0 0 0 0 0 0
58021- 0 0 0 0 0 0 0 0 0 6 6 6
58022- 14 14 14 18 18 18 22 22 22 22 22 22
58023- 18 18 18 14 14 14 10 10 10 6 6 6
58024- 0 0 0 0 0 0 0 0 0 0 0 0
58025- 0 0 0 0 0 0 0 0 0 0 0 0
58026- 0 0 0 0 0 0 0 0 0 0 0 0
58027- 0 0 0 0 0 0 0 0 0 0 0 0
58028- 0 0 0 0 0 0 0 0 0 0 0 0
58029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58042+4 4 4 4 4 4
58043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58056+4 4 4 4 4 4
58057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58070+4 4 4 4 4 4
58071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58084+4 4 4 4 4 4
58085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58098+4 4 4 4 4 4
58099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58112+4 4 4 4 4 4
58113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58117+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
58118+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
58119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58122+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
58123+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58124+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
58125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58126+4 4 4 4 4 4
58127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58131+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
58132+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
58133+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58136+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
58137+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
58138+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
58139+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58140+4 4 4 4 4 4
58141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58145+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
58146+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
58147+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58150+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
58151+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
58152+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
58153+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
58154+4 4 4 4 4 4
58155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58158+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
58159+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
58160+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
58161+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
58162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58163+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58164+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
58165+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
58166+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
58167+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
58168+4 4 4 4 4 4
58169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58172+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
58173+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
58174+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
58175+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
58176+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58177+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
58178+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
58179+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
58180+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
58181+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
58182+4 4 4 4 4 4
58183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58186+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
58187+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
58188+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
58189+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
58190+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58191+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
58192+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
58193+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
58194+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
58195+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
58196+4 4 4 4 4 4
58197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58199+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
58200+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
58201+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
58202+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
58203+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
58204+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
58205+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
58206+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
58207+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
58208+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
58209+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
58210+4 4 4 4 4 4
58211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58213+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
58214+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
58215+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
58216+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
58217+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
58218+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
58219+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
58220+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
58221+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
58222+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
58223+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
58224+4 4 4 4 4 4
58225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58227+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
58228+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
58229+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
58230+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
58231+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
58232+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
58233+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
58234+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
58235+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
58236+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
58237+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58238+4 4 4 4 4 4
58239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58241+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
58242+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
58243+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
58244+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
58245+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
58246+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
58247+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
58248+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
58249+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
58250+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
58251+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
58252+4 4 4 4 4 4
58253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58254+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
58255+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
58256+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
58257+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
58258+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
58259+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
58260+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
58261+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
58262+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
58263+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
58264+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
58265+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
58266+4 4 4 4 4 4
58267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58268+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
58269+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
58270+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
58271+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58272+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
58273+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
58274+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
58275+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
58276+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
58277+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
58278+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
58279+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
58280+0 0 0 4 4 4
58281+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58282+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
58283+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
58284+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
58285+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
58286+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
58287+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
58288+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
58289+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
58290+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
58291+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
58292+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
58293+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
58294+2 0 0 0 0 0
58295+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
58296+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
58297+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
58298+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
58299+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
58300+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
58301+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
58302+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
58303+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
58304+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
58305+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
58306+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
58307+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
58308+37 38 37 0 0 0
58309+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58310+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
58311+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
58312+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
58313+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
58314+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
58315+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
58316+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
58317+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
58318+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
58319+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
58320+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
58321+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
58322+85 115 134 4 0 0
58323+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
58324+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
58325+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
58326+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
58327+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
58328+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
58329+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
58330+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
58331+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
58332+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
58333+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
58334+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
58335+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
58336+60 73 81 4 0 0
58337+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
58338+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
58339+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
58340+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
58341+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
58342+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
58343+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
58344+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
58345+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
58346+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
58347+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
58348+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
58349+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
58350+16 19 21 4 0 0
58351+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
58352+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
58353+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
58354+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
58355+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
58356+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
58357+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
58358+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
58359+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
58360+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
58361+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
58362+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
58363+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
58364+4 0 0 4 3 3
58365+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
58366+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
58367+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
58368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
58369+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
58370+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
58371+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
58372+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
58373+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
58374+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
58375+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
58376+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
58377+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
58378+3 2 2 4 4 4
58379+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
58380+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
58381+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
58382+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58383+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
58384+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
58385+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
58386+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
58387+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
58388+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
58389+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
58390+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
58391+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
58392+4 4 4 4 4 4
58393+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
58394+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
58395+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
58396+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
58397+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
58398+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
58399+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
58400+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
58401+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
58402+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
58403+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
58404+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
58405+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
58406+4 4 4 4 4 4
58407+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
58408+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
58409+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
58410+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
58411+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
58412+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58413+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
58414+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
58415+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
58416+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
58417+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
58418+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
58419+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
58420+5 5 5 5 5 5
58421+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
58422+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
58423+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
58424+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
58425+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
58426+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58427+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
58428+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
58429+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
58430+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
58431+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
58432+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
58433+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58434+5 5 5 4 4 4
58435+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
58436+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
58437+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
58438+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
58439+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58440+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
58441+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
58442+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
58443+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
58444+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
58445+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
58446+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58448+4 4 4 4 4 4
58449+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
58450+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
58451+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
58452+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
58453+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
58454+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58455+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58456+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
58457+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
58458+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
58459+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
58460+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
58461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58462+4 4 4 4 4 4
58463+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
58464+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
58465+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
58466+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
58467+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58468+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
58469+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
58470+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
58471+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
58472+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
58473+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
58474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58476+4 4 4 4 4 4
58477+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
58478+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
58479+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
58480+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
58481+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58482+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58483+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58484+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
58485+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
58486+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
58487+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
58488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58490+4 4 4 4 4 4
58491+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
58492+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
58493+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
58494+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
58495+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58496+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
58497+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58498+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
58499+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
58500+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
58501+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58504+4 4 4 4 4 4
58505+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
58506+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
58507+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
58508+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
58509+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58510+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
58511+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
58512+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
58513+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
58514+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
58515+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
58516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58518+4 4 4 4 4 4
58519+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
58520+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
58521+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
58522+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
58523+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58524+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
58525+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
58526+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
58527+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
58528+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
58529+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
58530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58532+4 4 4 4 4 4
58533+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
58534+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
58535+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
58536+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58537+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
58538+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
58539+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
58540+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
58541+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
58542+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
58543+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58546+4 4 4 4 4 4
58547+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
58548+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
58549+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
58550+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58551+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58552+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
58553+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
58554+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
58555+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
58556+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
58557+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58560+4 4 4 4 4 4
58561+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
58562+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
58563+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58564+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58565+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58566+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
58567+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
58568+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
58569+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
58570+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
58571+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58574+4 4 4 4 4 4
58575+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
58576+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
58577+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58578+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58579+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58580+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
58581+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
58582+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
58583+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58584+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58585+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58588+4 4 4 4 4 4
58589+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58590+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
58591+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58592+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
58593+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
58594+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
58595+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
58596+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
58597+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58598+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58599+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58602+4 4 4 4 4 4
58603+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58604+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
58605+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58606+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
58607+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58608+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
58609+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
58610+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
58611+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58612+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58613+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58616+4 4 4 4 4 4
58617+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
58618+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
58619+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58620+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
58621+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
58622+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
58623+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
58624+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
58625+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58626+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58627+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58630+4 4 4 4 4 4
58631+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
58632+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
58633+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58634+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
58635+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
58636+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
58637+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
58638+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
58639+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58640+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58641+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58644+4 4 4 4 4 4
58645+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
58646+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
58647+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58648+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
58649+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
58650+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
58651+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
58652+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
58653+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58654+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58655+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58658+4 4 4 4 4 4
58659+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
58660+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
58661+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58662+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
58663+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
58664+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
58665+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
58666+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
58667+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
58668+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58669+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58672+4 4 4 4 4 4
58673+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
58674+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
58675+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
58676+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
58677+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
58678+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
58679+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
58680+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
58681+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58682+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58683+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58686+4 4 4 4 4 4
58687+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58688+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
58689+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58690+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
58691+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
58692+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
58693+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
58694+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
58695+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58696+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58697+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58700+4 4 4 4 4 4
58701+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
58702+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
58703+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
58704+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
58705+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
58706+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
58707+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58708+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
58709+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58710+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58711+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58714+4 4 4 4 4 4
58715+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58716+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
58717+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
58718+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
58719+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
58720+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
58721+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58722+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
58723+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58724+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58725+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58728+4 4 4 4 4 4
58729+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
58730+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
58731+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
58732+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
58733+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
58734+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
58735+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
58736+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
58737+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
58738+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58739+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58742+4 4 4 4 4 4
58743+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58744+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
58745+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
58746+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
58747+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
58748+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
58749+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
58750+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
58751+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
58752+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58753+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58756+4 4 4 4 4 4
58757+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
58758+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
58759+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
58760+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
58761+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
58762+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
58763+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
58764+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
58765+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
58766+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58767+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58770+4 4 4 4 4 4
58771+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58772+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
58773+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
58774+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
58775+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
58776+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
58777+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
58778+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
58779+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
58780+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58781+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58784+4 4 4 4 4 4
58785+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
58786+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
58787+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
58788+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
58789+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
58790+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
58791+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
58792+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
58793+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
58794+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
58795+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58798+4 4 4 4 4 4
58799+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
58800+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58801+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
58802+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
58803+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
58804+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
58805+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
58806+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
58807+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
58808+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
58809+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58812+4 4 4 4 4 4
58813+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
58814+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58815+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
58816+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
58817+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
58818+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
58819+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58820+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
58821+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
58822+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
58823+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58826+4 4 4 4 4 4
58827+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
58828+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
58829+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
58830+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
58831+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
58832+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
58833+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
58834+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
58835+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
58836+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
58837+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58840+4 4 4 4 4 4
58841+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
58842+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
58843+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58844+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
58845+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
58846+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
58847+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
58848+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
58849+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
58850+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
58851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58854+4 4 4 4 4 4
58855+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58856+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
58857+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
58858+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
58859+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
58860+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
58861+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
58862+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
58863+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
58864+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58868+4 4 4 4 4 4
58869+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
58870+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
58871+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
58872+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
58873+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
58874+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
58875+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
58876+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
58877+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
58878+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58882+4 4 4 4 4 4
58883+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
58884+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
58885+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
58886+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
58887+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
58888+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
58889+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
58890+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
58891+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58892+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58896+4 4 4 4 4 4
58897+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
58898+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58899+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
58900+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58901+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
58902+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
58903+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
58904+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
58905+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
58906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58910+4 4 4 4 4 4
58911+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
58912+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
58913+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
58914+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
58915+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
58916+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
58917+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
58918+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
58919+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
58920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58924+4 4 4 4 4 4
58925+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58926+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
58927+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
58928+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
58929+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
58930+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
58931+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
58932+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
58933+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58938+4 4 4 4 4 4
58939+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
58940+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
58941+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
58942+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
58943+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
58944+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
58945+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
58946+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
58947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58952+4 4 4 4 4 4
58953+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
58954+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
58955+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
58956+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
58957+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
58958+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
58959+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
58960+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58966+4 4 4 4 4 4
58967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58968+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
58969+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58970+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
58971+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
58972+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
58973+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
58974+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
58975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58980+4 4 4 4 4 4
58981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58982+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
58983+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
58984+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
58985+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
58986+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
58987+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
58988+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
58989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58994+4 4 4 4 4 4
58995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58996+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58997+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
58998+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
58999+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
59000+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
59001+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
59002+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59008+4 4 4 4 4 4
59009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59011+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59012+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
59013+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
59014+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
59015+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
59016+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59022+4 4 4 4 4 4
59023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59026+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59027+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
59028+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
59029+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
59030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59036+4 4 4 4 4 4
59037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59040+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59041+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59042+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
59043+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
59044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59050+4 4 4 4 4 4
59051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59054+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
59055+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59056+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59057+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
59058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59064+4 4 4 4 4 4
59065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59068+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
59069+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
59070+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
59071+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
59072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59078+4 4 4 4 4 4
59079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59083+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
59084+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59085+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59092+4 4 4 4 4 4
59093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59097+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
59098+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
59099+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
59100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59106+4 4 4 4 4 4
59107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59111+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
59112+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
59113+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59120+4 4 4 4 4 4
59121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59125+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
59126+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
59127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59134+4 4 4 4 4 4
59135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59139+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59140+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
59141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59148+4 4 4 4 4 4
59149diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
59150index fef20db..d28b1ab 100644
59151--- a/drivers/xen/xenfs/xenstored.c
59152+++ b/drivers/xen/xenfs/xenstored.c
59153@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
59154 static int xsd_kva_open(struct inode *inode, struct file *file)
59155 {
59156 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
59157+#ifdef CONFIG_GRKERNSEC_HIDESYM
59158+ NULL);
59159+#else
59160 xen_store_interface);
59161+#endif
59162+
59163 if (!file->private_data)
59164 return -ENOMEM;
59165 return 0;
59166diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
59167index cc1cfae..41158ad 100644
59168--- a/fs/9p/vfs_addr.c
59169+++ b/fs/9p/vfs_addr.c
59170@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
59171
59172 retval = v9fs_file_write_internal(inode,
59173 v9inode->writeback_fid,
59174- (__force const char __user *)buffer,
59175+ (const char __force_user *)buffer,
59176 len, &offset, 0);
59177 if (retval > 0)
59178 retval = 0;
59179diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
59180index 7fa4f7a..a7ebf8c 100644
59181--- a/fs/9p/vfs_inode.c
59182+++ b/fs/9p/vfs_inode.c
59183@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
59184 void
59185 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
59186 {
59187- char *s = nd_get_link(nd);
59188+ const char *s = nd_get_link(nd);
59189
59190 p9_debug(P9_DEBUG_VFS, " %s %s\n",
59191 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
59192diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
59193index 370b24c..ff0be7b 100644
59194--- a/fs/Kconfig.binfmt
59195+++ b/fs/Kconfig.binfmt
59196@@ -103,7 +103,7 @@ config HAVE_AOUT
59197
59198 config BINFMT_AOUT
59199 tristate "Kernel support for a.out and ECOFF binaries"
59200- depends on HAVE_AOUT
59201+ depends on HAVE_AOUT && BROKEN
59202 ---help---
59203 A.out (Assembler.OUTput) is a set of formats for libraries and
59204 executables used in the earliest versions of UNIX. Linux used
59205diff --git a/fs/afs/inode.c b/fs/afs/inode.c
59206index 2946712..f737435 100644
59207--- a/fs/afs/inode.c
59208+++ b/fs/afs/inode.c
59209@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59210 struct afs_vnode *vnode;
59211 struct super_block *sb;
59212 struct inode *inode;
59213- static atomic_t afs_autocell_ino;
59214+ static atomic_unchecked_t afs_autocell_ino;
59215
59216 _enter("{%x:%u},%*.*s,",
59217 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
59218@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59219 data.fid.unique = 0;
59220 data.fid.vnode = 0;
59221
59222- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
59223+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
59224 afs_iget5_autocell_test, afs_iget5_set,
59225 &data);
59226 if (!inode) {
59227diff --git a/fs/aio.c b/fs/aio.c
59228index 1c9c5f0..94455bc 100644
59229--- a/fs/aio.c
59230+++ b/fs/aio.c
59231@@ -141,6 +141,7 @@ struct kioctx {
59232
59233 struct {
59234 unsigned tail;
59235+ unsigned completed_events;
59236 spinlock_t completion_lock;
59237 } ____cacheline_aligned_in_smp;
59238
59239@@ -380,7 +381,7 @@ static int aio_setup_ring(struct kioctx *ctx)
59240 size += sizeof(struct io_event) * nr_events;
59241
59242 nr_pages = PFN_UP(size);
59243- if (nr_pages < 0)
59244+ if (nr_pages <= 0)
59245 return -EINVAL;
59246
59247 file = aio_private_file(ctx, nr_pages);
59248@@ -880,6 +881,68 @@ out:
59249 return ret;
59250 }
59251
59252+/* refill_reqs_available
59253+ * Updates the reqs_available reference counts used for tracking the
59254+ * number of free slots in the completion ring. This can be called
59255+ * from aio_complete() (to optimistically update reqs_available) or
59256+ * from aio_get_req() (the we're out of events case). It must be
59257+ * called holding ctx->completion_lock.
59258+ */
59259+static void refill_reqs_available(struct kioctx *ctx, unsigned head,
59260+ unsigned tail)
59261+{
59262+ unsigned events_in_ring, completed;
59263+
59264+ /* Clamp head since userland can write to it. */
59265+ head %= ctx->nr_events;
59266+ if (head <= tail)
59267+ events_in_ring = tail - head;
59268+ else
59269+ events_in_ring = ctx->nr_events - (head - tail);
59270+
59271+ completed = ctx->completed_events;
59272+ if (events_in_ring < completed)
59273+ completed -= events_in_ring;
59274+ else
59275+ completed = 0;
59276+
59277+ if (!completed)
59278+ return;
59279+
59280+ ctx->completed_events -= completed;
59281+ put_reqs_available(ctx, completed);
59282+}
59283+
59284+/* user_refill_reqs_available
59285+ * Called to refill reqs_available when aio_get_req() encounters an
59286+ * out of space in the completion ring.
59287+ */
59288+static void user_refill_reqs_available(struct kioctx *ctx)
59289+{
59290+ spin_lock_irq(&ctx->completion_lock);
59291+ if (ctx->completed_events) {
59292+ struct aio_ring *ring;
59293+ unsigned head;
59294+
59295+ /* Access of ring->head may race with aio_read_events_ring()
59296+ * here, but that's okay since whether we read the old version
59297+ * or the new version, and either will be valid. The important
59298+ * part is that head cannot pass tail since we prevent
59299+ * aio_complete() from updating tail by holding
59300+ * ctx->completion_lock. Even if head is invalid, the check
59301+ * against ctx->completed_events below will make sure we do the
59302+ * safe/right thing.
59303+ */
59304+ ring = kmap_atomic(ctx->ring_pages[0]);
59305+ head = ring->head;
59306+ kunmap_atomic(ring);
59307+
59308+ refill_reqs_available(ctx, head, ctx->tail);
59309+ }
59310+
59311+ spin_unlock_irq(&ctx->completion_lock);
59312+}
59313+
59314 /* aio_get_req
59315 * Allocate a slot for an aio request.
59316 * Returns NULL if no requests are free.
59317@@ -888,8 +951,11 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
59318 {
59319 struct kiocb *req;
59320
59321- if (!get_reqs_available(ctx))
59322- return NULL;
59323+ if (!get_reqs_available(ctx)) {
59324+ user_refill_reqs_available(ctx);
59325+ if (!get_reqs_available(ctx))
59326+ return NULL;
59327+ }
59328
59329 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
59330 if (unlikely(!req))
59331@@ -948,8 +1014,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
59332 struct kioctx *ctx = iocb->ki_ctx;
59333 struct aio_ring *ring;
59334 struct io_event *ev_page, *event;
59335+ unsigned tail, pos, head;
59336 unsigned long flags;
59337- unsigned tail, pos;
59338
59339 /*
59340 * Special case handling for sync iocbs:
59341@@ -1010,10 +1076,14 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
59342 ctx->tail = tail;
59343
59344 ring = kmap_atomic(ctx->ring_pages[0]);
59345+ head = ring->head;
59346 ring->tail = tail;
59347 kunmap_atomic(ring);
59348 flush_dcache_page(ctx->ring_pages[0]);
59349
59350+ ctx->completed_events++;
59351+ if (ctx->completed_events > 1)
59352+ refill_reqs_available(ctx, head, tail);
59353 spin_unlock_irqrestore(&ctx->completion_lock, flags);
59354
59355 pr_debug("added to ring %p at [%u]\n", iocb, tail);
59356@@ -1028,7 +1098,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
59357
59358 /* everything turned out well, dispose of the aiocb. */
59359 kiocb_free(iocb);
59360- put_reqs_available(ctx, 1);
59361
59362 /*
59363 * We have to order our ring_info tail store above and test
59364diff --git a/fs/attr.c b/fs/attr.c
59365index 6530ced..4a827e2 100644
59366--- a/fs/attr.c
59367+++ b/fs/attr.c
59368@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
59369 unsigned long limit;
59370
59371 limit = rlimit(RLIMIT_FSIZE);
59372+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
59373 if (limit != RLIM_INFINITY && offset > limit)
59374 goto out_sig;
59375 if (offset > inode->i_sb->s_maxbytes)
59376diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
59377index 116fd38..c04182da 100644
59378--- a/fs/autofs4/waitq.c
59379+++ b/fs/autofs4/waitq.c
59380@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
59381 {
59382 unsigned long sigpipe, flags;
59383 mm_segment_t fs;
59384- const char *data = (const char *)addr;
59385+ const char __user *data = (const char __force_user *)addr;
59386 ssize_t wr = 0;
59387
59388 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
59389@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
59390 return 1;
59391 }
59392
59393+#ifdef CONFIG_GRKERNSEC_HIDESYM
59394+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
59395+#endif
59396+
59397 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59398 enum autofs_notify notify)
59399 {
59400@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59401
59402 /* If this is a direct mount request create a dummy name */
59403 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
59404+#ifdef CONFIG_GRKERNSEC_HIDESYM
59405+ /* this name does get written to userland via autofs4_write() */
59406+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
59407+#else
59408 qstr.len = sprintf(name, "%p", dentry);
59409+#endif
59410 else {
59411 qstr.len = autofs4_getpath(sbi, dentry, &name);
59412 if (!qstr.len) {
59413diff --git a/fs/befs/endian.h b/fs/befs/endian.h
59414index 2722387..56059b5 100644
59415--- a/fs/befs/endian.h
59416+++ b/fs/befs/endian.h
59417@@ -11,7 +11,7 @@
59418
59419 #include <asm/byteorder.h>
59420
59421-static inline u64
59422+static inline u64 __intentional_overflow(-1)
59423 fs64_to_cpu(const struct super_block *sb, fs64 n)
59424 {
59425 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59426@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
59427 return (__force fs64)cpu_to_be64(n);
59428 }
59429
59430-static inline u32
59431+static inline u32 __intentional_overflow(-1)
59432 fs32_to_cpu(const struct super_block *sb, fs32 n)
59433 {
59434 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59435@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
59436 return (__force fs32)cpu_to_be32(n);
59437 }
59438
59439-static inline u16
59440+static inline u16 __intentional_overflow(-1)
59441 fs16_to_cpu(const struct super_block *sb, fs16 n)
59442 {
59443 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59444diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
59445index ca0ba15..0fa3257 100644
59446--- a/fs/binfmt_aout.c
59447+++ b/fs/binfmt_aout.c
59448@@ -16,6 +16,7 @@
59449 #include <linux/string.h>
59450 #include <linux/fs.h>
59451 #include <linux/file.h>
59452+#include <linux/security.h>
59453 #include <linux/stat.h>
59454 #include <linux/fcntl.h>
59455 #include <linux/ptrace.h>
59456@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
59457 #endif
59458 # define START_STACK(u) ((void __user *)u.start_stack)
59459
59460+ memset(&dump, 0, sizeof(dump));
59461+
59462 fs = get_fs();
59463 set_fs(KERNEL_DS);
59464 has_dumped = 1;
59465@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
59466
59467 /* If the size of the dump file exceeds the rlimit, then see what would happen
59468 if we wrote the stack, but not the data area. */
59469+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
59470 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
59471 dump.u_dsize = 0;
59472
59473 /* Make sure we have enough room to write the stack and data areas. */
59474+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
59475 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
59476 dump.u_ssize = 0;
59477
59478@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
59479 rlim = rlimit(RLIMIT_DATA);
59480 if (rlim >= RLIM_INFINITY)
59481 rlim = ~0;
59482+
59483+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
59484 if (ex.a_data + ex.a_bss > rlim)
59485 return -ENOMEM;
59486
59487@@ -264,6 +271,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
59488
59489 install_exec_creds(bprm);
59490
59491+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59492+ current->mm->pax_flags = 0UL;
59493+#endif
59494+
59495+#ifdef CONFIG_PAX_PAGEEXEC
59496+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
59497+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
59498+
59499+#ifdef CONFIG_PAX_EMUTRAMP
59500+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
59501+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
59502+#endif
59503+
59504+#ifdef CONFIG_PAX_MPROTECT
59505+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
59506+ current->mm->pax_flags |= MF_PAX_MPROTECT;
59507+#endif
59508+
59509+ }
59510+#endif
59511+
59512 if (N_MAGIC(ex) == OMAGIC) {
59513 unsigned long text_addr, map_size;
59514 loff_t pos;
59515@@ -321,7 +349,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
59516 }
59517
59518 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
59519- PROT_READ | PROT_WRITE | PROT_EXEC,
59520+ PROT_READ | PROT_WRITE,
59521 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
59522 fd_offset + ex.a_text);
59523 if (error != N_DATADDR(ex)) {
59524diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
59525index 3892c1a..4e27c04 100644
59526--- a/fs/binfmt_elf.c
59527+++ b/fs/binfmt_elf.c
59528@@ -34,6 +34,7 @@
59529 #include <linux/utsname.h>
59530 #include <linux/coredump.h>
59531 #include <linux/sched.h>
59532+#include <linux/xattr.h>
59533 #include <asm/uaccess.h>
59534 #include <asm/param.h>
59535 #include <asm/page.h>
59536@@ -47,7 +48,7 @@
59537
59538 static int load_elf_binary(struct linux_binprm *bprm);
59539 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
59540- int, int, unsigned long);
59541+ int, int, unsigned long) __intentional_overflow(-1);
59542
59543 #ifdef CONFIG_USELIB
59544 static int load_elf_library(struct file *);
59545@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
59546 #define elf_core_dump NULL
59547 #endif
59548
59549+#ifdef CONFIG_PAX_MPROTECT
59550+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
59551+#endif
59552+
59553+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59554+static void elf_handle_mmap(struct file *file);
59555+#endif
59556+
59557 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
59558 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
59559 #else
59560@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
59561 .load_binary = load_elf_binary,
59562 .load_shlib = load_elf_library,
59563 .core_dump = elf_core_dump,
59564+
59565+#ifdef CONFIG_PAX_MPROTECT
59566+ .handle_mprotect= elf_handle_mprotect,
59567+#endif
59568+
59569+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59570+ .handle_mmap = elf_handle_mmap,
59571+#endif
59572+
59573 .min_coredump = ELF_EXEC_PAGESIZE,
59574 };
59575
59576@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
59577
59578 static int set_brk(unsigned long start, unsigned long end)
59579 {
59580+ unsigned long e = end;
59581+
59582 start = ELF_PAGEALIGN(start);
59583 end = ELF_PAGEALIGN(end);
59584 if (end > start) {
59585@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
59586 if (BAD_ADDR(addr))
59587 return addr;
59588 }
59589- current->mm->start_brk = current->mm->brk = end;
59590+ current->mm->start_brk = current->mm->brk = e;
59591 return 0;
59592 }
59593
59594@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59595 elf_addr_t __user *u_rand_bytes;
59596 const char *k_platform = ELF_PLATFORM;
59597 const char *k_base_platform = ELF_BASE_PLATFORM;
59598- unsigned char k_rand_bytes[16];
59599+ u32 k_rand_bytes[4];
59600 int items;
59601 elf_addr_t *elf_info;
59602 int ei_index = 0;
59603 const struct cred *cred = current_cred();
59604 struct vm_area_struct *vma;
59605+ unsigned long saved_auxv[AT_VECTOR_SIZE];
59606
59607 /*
59608 * In some cases (e.g. Hyper-Threading), we want to avoid L1
59609@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59610 * Generate 16 random bytes for userspace PRNG seeding.
59611 */
59612 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
59613- u_rand_bytes = (elf_addr_t __user *)
59614- STACK_ALLOC(p, sizeof(k_rand_bytes));
59615+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
59616+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
59617+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
59618+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
59619+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
59620+ u_rand_bytes = (elf_addr_t __user *) p;
59621 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
59622 return -EFAULT;
59623
59624@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59625 return -EFAULT;
59626 current->mm->env_end = p;
59627
59628+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
59629+
59630 /* Put the elf_info on the stack in the right place. */
59631 sp = (elf_addr_t __user *)envp + 1;
59632- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
59633+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
59634 return -EFAULT;
59635 return 0;
59636 }
59637@@ -393,15 +420,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
59638 an ELF header */
59639
59640 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59641- struct file *interpreter, unsigned long *interp_map_addr,
59642- unsigned long no_base)
59643+ struct file *interpreter, unsigned long no_base)
59644 {
59645 struct elf_phdr *elf_phdata;
59646 struct elf_phdr *eppnt;
59647- unsigned long load_addr = 0;
59648+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
59649 int load_addr_set = 0;
59650 unsigned long last_bss = 0, elf_bss = 0;
59651- unsigned long error = ~0UL;
59652+ unsigned long error = -EINVAL;
59653 unsigned long total_size;
59654 int retval, i, size;
59655
59656@@ -447,6 +473,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59657 goto out_close;
59658 }
59659
59660+#ifdef CONFIG_PAX_SEGMEXEC
59661+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
59662+ pax_task_size = SEGMEXEC_TASK_SIZE;
59663+#endif
59664+
59665 eppnt = elf_phdata;
59666 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
59667 if (eppnt->p_type == PT_LOAD) {
59668@@ -470,8 +501,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59669 map_addr = elf_map(interpreter, load_addr + vaddr,
59670 eppnt, elf_prot, elf_type, total_size);
59671 total_size = 0;
59672- if (!*interp_map_addr)
59673- *interp_map_addr = map_addr;
59674 error = map_addr;
59675 if (BAD_ADDR(map_addr))
59676 goto out_close;
59677@@ -490,8 +519,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59678 k = load_addr + eppnt->p_vaddr;
59679 if (BAD_ADDR(k) ||
59680 eppnt->p_filesz > eppnt->p_memsz ||
59681- eppnt->p_memsz > TASK_SIZE ||
59682- TASK_SIZE - eppnt->p_memsz < k) {
59683+ eppnt->p_memsz > pax_task_size ||
59684+ pax_task_size - eppnt->p_memsz < k) {
59685 error = -ENOMEM;
59686 goto out_close;
59687 }
59688@@ -530,9 +559,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59689 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
59690
59691 /* Map the last of the bss segment */
59692- error = vm_brk(elf_bss, last_bss - elf_bss);
59693- if (BAD_ADDR(error))
59694- goto out_close;
59695+ if (last_bss > elf_bss) {
59696+ error = vm_brk(elf_bss, last_bss - elf_bss);
59697+ if (BAD_ADDR(error))
59698+ goto out_close;
59699+ }
59700 }
59701
59702 error = load_addr;
59703@@ -543,6 +574,336 @@ out:
59704 return error;
59705 }
59706
59707+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59708+#ifdef CONFIG_PAX_SOFTMODE
59709+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
59710+{
59711+ unsigned long pax_flags = 0UL;
59712+
59713+#ifdef CONFIG_PAX_PAGEEXEC
59714+ if (elf_phdata->p_flags & PF_PAGEEXEC)
59715+ pax_flags |= MF_PAX_PAGEEXEC;
59716+#endif
59717+
59718+#ifdef CONFIG_PAX_SEGMEXEC
59719+ if (elf_phdata->p_flags & PF_SEGMEXEC)
59720+ pax_flags |= MF_PAX_SEGMEXEC;
59721+#endif
59722+
59723+#ifdef CONFIG_PAX_EMUTRAMP
59724+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
59725+ pax_flags |= MF_PAX_EMUTRAMP;
59726+#endif
59727+
59728+#ifdef CONFIG_PAX_MPROTECT
59729+ if (elf_phdata->p_flags & PF_MPROTECT)
59730+ pax_flags |= MF_PAX_MPROTECT;
59731+#endif
59732+
59733+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59734+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
59735+ pax_flags |= MF_PAX_RANDMMAP;
59736+#endif
59737+
59738+ return pax_flags;
59739+}
59740+#endif
59741+
59742+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
59743+{
59744+ unsigned long pax_flags = 0UL;
59745+
59746+#ifdef CONFIG_PAX_PAGEEXEC
59747+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
59748+ pax_flags |= MF_PAX_PAGEEXEC;
59749+#endif
59750+
59751+#ifdef CONFIG_PAX_SEGMEXEC
59752+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
59753+ pax_flags |= MF_PAX_SEGMEXEC;
59754+#endif
59755+
59756+#ifdef CONFIG_PAX_EMUTRAMP
59757+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
59758+ pax_flags |= MF_PAX_EMUTRAMP;
59759+#endif
59760+
59761+#ifdef CONFIG_PAX_MPROTECT
59762+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
59763+ pax_flags |= MF_PAX_MPROTECT;
59764+#endif
59765+
59766+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59767+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
59768+ pax_flags |= MF_PAX_RANDMMAP;
59769+#endif
59770+
59771+ return pax_flags;
59772+}
59773+#endif
59774+
59775+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59776+#ifdef CONFIG_PAX_SOFTMODE
59777+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
59778+{
59779+ unsigned long pax_flags = 0UL;
59780+
59781+#ifdef CONFIG_PAX_PAGEEXEC
59782+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
59783+ pax_flags |= MF_PAX_PAGEEXEC;
59784+#endif
59785+
59786+#ifdef CONFIG_PAX_SEGMEXEC
59787+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
59788+ pax_flags |= MF_PAX_SEGMEXEC;
59789+#endif
59790+
59791+#ifdef CONFIG_PAX_EMUTRAMP
59792+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
59793+ pax_flags |= MF_PAX_EMUTRAMP;
59794+#endif
59795+
59796+#ifdef CONFIG_PAX_MPROTECT
59797+ if (pax_flags_softmode & MF_PAX_MPROTECT)
59798+ pax_flags |= MF_PAX_MPROTECT;
59799+#endif
59800+
59801+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59802+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
59803+ pax_flags |= MF_PAX_RANDMMAP;
59804+#endif
59805+
59806+ return pax_flags;
59807+}
59808+#endif
59809+
59810+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
59811+{
59812+ unsigned long pax_flags = 0UL;
59813+
59814+#ifdef CONFIG_PAX_PAGEEXEC
59815+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
59816+ pax_flags |= MF_PAX_PAGEEXEC;
59817+#endif
59818+
59819+#ifdef CONFIG_PAX_SEGMEXEC
59820+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
59821+ pax_flags |= MF_PAX_SEGMEXEC;
59822+#endif
59823+
59824+#ifdef CONFIG_PAX_EMUTRAMP
59825+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
59826+ pax_flags |= MF_PAX_EMUTRAMP;
59827+#endif
59828+
59829+#ifdef CONFIG_PAX_MPROTECT
59830+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
59831+ pax_flags |= MF_PAX_MPROTECT;
59832+#endif
59833+
59834+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59835+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
59836+ pax_flags |= MF_PAX_RANDMMAP;
59837+#endif
59838+
59839+ return pax_flags;
59840+}
59841+#endif
59842+
59843+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59844+static unsigned long pax_parse_defaults(void)
59845+{
59846+ unsigned long pax_flags = 0UL;
59847+
59848+#ifdef CONFIG_PAX_SOFTMODE
59849+ if (pax_softmode)
59850+ return pax_flags;
59851+#endif
59852+
59853+#ifdef CONFIG_PAX_PAGEEXEC
59854+ pax_flags |= MF_PAX_PAGEEXEC;
59855+#endif
59856+
59857+#ifdef CONFIG_PAX_SEGMEXEC
59858+ pax_flags |= MF_PAX_SEGMEXEC;
59859+#endif
59860+
59861+#ifdef CONFIG_PAX_MPROTECT
59862+ pax_flags |= MF_PAX_MPROTECT;
59863+#endif
59864+
59865+#ifdef CONFIG_PAX_RANDMMAP
59866+ if (randomize_va_space)
59867+ pax_flags |= MF_PAX_RANDMMAP;
59868+#endif
59869+
59870+ return pax_flags;
59871+}
59872+
59873+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
59874+{
59875+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
59876+
59877+#ifdef CONFIG_PAX_EI_PAX
59878+
59879+#ifdef CONFIG_PAX_SOFTMODE
59880+ if (pax_softmode)
59881+ return pax_flags;
59882+#endif
59883+
59884+ pax_flags = 0UL;
59885+
59886+#ifdef CONFIG_PAX_PAGEEXEC
59887+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
59888+ pax_flags |= MF_PAX_PAGEEXEC;
59889+#endif
59890+
59891+#ifdef CONFIG_PAX_SEGMEXEC
59892+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
59893+ pax_flags |= MF_PAX_SEGMEXEC;
59894+#endif
59895+
59896+#ifdef CONFIG_PAX_EMUTRAMP
59897+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
59898+ pax_flags |= MF_PAX_EMUTRAMP;
59899+#endif
59900+
59901+#ifdef CONFIG_PAX_MPROTECT
59902+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
59903+ pax_flags |= MF_PAX_MPROTECT;
59904+#endif
59905+
59906+#ifdef CONFIG_PAX_ASLR
59907+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
59908+ pax_flags |= MF_PAX_RANDMMAP;
59909+#endif
59910+
59911+#endif
59912+
59913+ return pax_flags;
59914+
59915+}
59916+
59917+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
59918+{
59919+
59920+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59921+ unsigned long i;
59922+
59923+ for (i = 0UL; i < elf_ex->e_phnum; i++)
59924+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
59925+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
59926+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
59927+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
59928+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
59929+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
59930+ return PAX_PARSE_FLAGS_FALLBACK;
59931+
59932+#ifdef CONFIG_PAX_SOFTMODE
59933+ if (pax_softmode)
59934+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
59935+ else
59936+#endif
59937+
59938+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
59939+ break;
59940+ }
59941+#endif
59942+
59943+ return PAX_PARSE_FLAGS_FALLBACK;
59944+}
59945+
59946+static unsigned long pax_parse_xattr_pax(struct file * const file)
59947+{
59948+
59949+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59950+ ssize_t xattr_size, i;
59951+ unsigned char xattr_value[sizeof("pemrs") - 1];
59952+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
59953+
59954+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
59955+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
59956+ return PAX_PARSE_FLAGS_FALLBACK;
59957+
59958+ for (i = 0; i < xattr_size; i++)
59959+ switch (xattr_value[i]) {
59960+ default:
59961+ return PAX_PARSE_FLAGS_FALLBACK;
59962+
59963+#define parse_flag(option1, option2, flag) \
59964+ case option1: \
59965+ if (pax_flags_hardmode & MF_PAX_##flag) \
59966+ return PAX_PARSE_FLAGS_FALLBACK;\
59967+ pax_flags_hardmode |= MF_PAX_##flag; \
59968+ break; \
59969+ case option2: \
59970+ if (pax_flags_softmode & MF_PAX_##flag) \
59971+ return PAX_PARSE_FLAGS_FALLBACK;\
59972+ pax_flags_softmode |= MF_PAX_##flag; \
59973+ break;
59974+
59975+ parse_flag('p', 'P', PAGEEXEC);
59976+ parse_flag('e', 'E', EMUTRAMP);
59977+ parse_flag('m', 'M', MPROTECT);
59978+ parse_flag('r', 'R', RANDMMAP);
59979+ parse_flag('s', 'S', SEGMEXEC);
59980+
59981+#undef parse_flag
59982+ }
59983+
59984+ if (pax_flags_hardmode & pax_flags_softmode)
59985+ return PAX_PARSE_FLAGS_FALLBACK;
59986+
59987+#ifdef CONFIG_PAX_SOFTMODE
59988+ if (pax_softmode)
59989+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
59990+ else
59991+#endif
59992+
59993+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
59994+#else
59995+ return PAX_PARSE_FLAGS_FALLBACK;
59996+#endif
59997+
59998+}
59999+
60000+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
60001+{
60002+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
60003+
60004+ pax_flags = pax_parse_defaults();
60005+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
60006+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
60007+ xattr_pax_flags = pax_parse_xattr_pax(file);
60008+
60009+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
60010+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
60011+ pt_pax_flags != xattr_pax_flags)
60012+ return -EINVAL;
60013+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
60014+ pax_flags = xattr_pax_flags;
60015+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
60016+ pax_flags = pt_pax_flags;
60017+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
60018+ pax_flags = ei_pax_flags;
60019+
60020+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
60021+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60022+ if ((__supported_pte_mask & _PAGE_NX))
60023+ pax_flags &= ~MF_PAX_SEGMEXEC;
60024+ else
60025+ pax_flags &= ~MF_PAX_PAGEEXEC;
60026+ }
60027+#endif
60028+
60029+ if (0 > pax_check_flags(&pax_flags))
60030+ return -EINVAL;
60031+
60032+ current->mm->pax_flags = pax_flags;
60033+ return 0;
60034+}
60035+#endif
60036+
60037 /*
60038 * These are the functions used to load ELF style executables and shared
60039 * libraries. There is no binary dependent code anywhere else.
60040@@ -556,6 +917,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
60041 {
60042 unsigned int random_variable = 0;
60043
60044+#ifdef CONFIG_PAX_RANDUSTACK
60045+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
60046+ return stack_top - current->mm->delta_stack;
60047+#endif
60048+
60049 if ((current->flags & PF_RANDOMIZE) &&
60050 !(current->personality & ADDR_NO_RANDOMIZE)) {
60051 random_variable = get_random_int() & STACK_RND_MASK;
60052@@ -574,7 +940,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
60053 unsigned long load_addr = 0, load_bias = 0;
60054 int load_addr_set = 0;
60055 char * elf_interpreter = NULL;
60056- unsigned long error;
60057+ unsigned long error = 0;
60058 struct elf_phdr *elf_ppnt, *elf_phdata;
60059 unsigned long elf_bss, elf_brk;
60060 int retval, i;
60061@@ -589,6 +955,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
60062 struct elfhdr elf_ex;
60063 struct elfhdr interp_elf_ex;
60064 } *loc;
60065+ unsigned long pax_task_size;
60066
60067 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
60068 if (!loc) {
60069@@ -726,6 +1093,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
60070 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
60071 may depend on the personality. */
60072 SET_PERSONALITY(loc->elf_ex);
60073+
60074+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60075+ current->mm->pax_flags = 0UL;
60076+#endif
60077+
60078+#ifdef CONFIG_PAX_DLRESOLVE
60079+ current->mm->call_dl_resolve = 0UL;
60080+#endif
60081+
60082+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
60083+ current->mm->call_syscall = 0UL;
60084+#endif
60085+
60086+#ifdef CONFIG_PAX_ASLR
60087+ current->mm->delta_mmap = 0UL;
60088+ current->mm->delta_stack = 0UL;
60089+#endif
60090+
60091+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60092+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
60093+ send_sig(SIGKILL, current, 0);
60094+ goto out_free_dentry;
60095+ }
60096+#endif
60097+
60098+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60099+ pax_set_initial_flags(bprm);
60100+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60101+ if (pax_set_initial_flags_func)
60102+ (pax_set_initial_flags_func)(bprm);
60103+#endif
60104+
60105+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
60106+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
60107+ current->mm->context.user_cs_limit = PAGE_SIZE;
60108+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
60109+ }
60110+#endif
60111+
60112+#ifdef CONFIG_PAX_SEGMEXEC
60113+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
60114+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
60115+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
60116+ pax_task_size = SEGMEXEC_TASK_SIZE;
60117+ current->mm->def_flags |= VM_NOHUGEPAGE;
60118+ } else
60119+#endif
60120+
60121+ pax_task_size = TASK_SIZE;
60122+
60123+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
60124+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60125+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
60126+ put_cpu();
60127+ }
60128+#endif
60129+
60130+#ifdef CONFIG_PAX_ASLR
60131+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
60132+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
60133+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
60134+ }
60135+#endif
60136+
60137+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
60138+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60139+ executable_stack = EXSTACK_DISABLE_X;
60140+ current->personality &= ~READ_IMPLIES_EXEC;
60141+ } else
60142+#endif
60143+
60144 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
60145 current->personality |= READ_IMPLIES_EXEC;
60146
60147@@ -815,6 +1253,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
60148 #else
60149 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
60150 #endif
60151+
60152+#ifdef CONFIG_PAX_RANDMMAP
60153+ /* PaX: randomize base address at the default exe base if requested */
60154+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
60155+#ifdef CONFIG_SPARC64
60156+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
60157+#else
60158+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
60159+#endif
60160+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
60161+ elf_flags |= MAP_FIXED;
60162+ }
60163+#endif
60164+
60165 }
60166
60167 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
60168@@ -847,9 +1299,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
60169 * allowed task size. Note that p_filesz must always be
60170 * <= p_memsz so it is only necessary to check p_memsz.
60171 */
60172- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
60173- elf_ppnt->p_memsz > TASK_SIZE ||
60174- TASK_SIZE - elf_ppnt->p_memsz < k) {
60175+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
60176+ elf_ppnt->p_memsz > pax_task_size ||
60177+ pax_task_size - elf_ppnt->p_memsz < k) {
60178 /* set_brk can never work. Avoid overflows. */
60179 send_sig(SIGKILL, current, 0);
60180 retval = -EINVAL;
60181@@ -888,17 +1340,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
60182 goto out_free_dentry;
60183 }
60184 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
60185- send_sig(SIGSEGV, current, 0);
60186- retval = -EFAULT; /* Nobody gets to see this, but.. */
60187- goto out_free_dentry;
60188+ /*
60189+ * This bss-zeroing can fail if the ELF
60190+ * file specifies odd protections. So
60191+ * we don't check the return value
60192+ */
60193 }
60194
60195+#ifdef CONFIG_PAX_RANDMMAP
60196+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
60197+ unsigned long start, size, flags;
60198+ vm_flags_t vm_flags;
60199+
60200+ start = ELF_PAGEALIGN(elf_brk);
60201+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
60202+ flags = MAP_FIXED | MAP_PRIVATE;
60203+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
60204+
60205+ down_write(&current->mm->mmap_sem);
60206+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
60207+ retval = -ENOMEM;
60208+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
60209+// if (current->personality & ADDR_NO_RANDOMIZE)
60210+// vm_flags |= VM_READ | VM_MAYREAD;
60211+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
60212+ retval = IS_ERR_VALUE(start) ? start : 0;
60213+ }
60214+ up_write(&current->mm->mmap_sem);
60215+ if (retval == 0)
60216+ retval = set_brk(start + size, start + size + PAGE_SIZE);
60217+ if (retval < 0) {
60218+ send_sig(SIGKILL, current, 0);
60219+ goto out_free_dentry;
60220+ }
60221+ }
60222+#endif
60223+
60224 if (elf_interpreter) {
60225- unsigned long interp_map_addr = 0;
60226-
60227 elf_entry = load_elf_interp(&loc->interp_elf_ex,
60228 interpreter,
60229- &interp_map_addr,
60230 load_bias);
60231 if (!IS_ERR((void *)elf_entry)) {
60232 /*
60233@@ -1130,7 +1610,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
60234 * Decide what to dump of a segment, part, all or none.
60235 */
60236 static unsigned long vma_dump_size(struct vm_area_struct *vma,
60237- unsigned long mm_flags)
60238+ unsigned long mm_flags, long signr)
60239 {
60240 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
60241
60242@@ -1168,7 +1648,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
60243 if (vma->vm_file == NULL)
60244 return 0;
60245
60246- if (FILTER(MAPPED_PRIVATE))
60247+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
60248 goto whole;
60249
60250 /*
60251@@ -1375,9 +1855,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
60252 {
60253 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
60254 int i = 0;
60255- do
60256+ do {
60257 i += 2;
60258- while (auxv[i - 2] != AT_NULL);
60259+ } while (auxv[i - 2] != AT_NULL);
60260 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
60261 }
60262
60263@@ -1386,7 +1866,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
60264 {
60265 mm_segment_t old_fs = get_fs();
60266 set_fs(KERNEL_DS);
60267- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
60268+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
60269 set_fs(old_fs);
60270 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
60271 }
60272@@ -2010,14 +2490,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
60273 }
60274
60275 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
60276- unsigned long mm_flags)
60277+ struct coredump_params *cprm)
60278 {
60279 struct vm_area_struct *vma;
60280 size_t size = 0;
60281
60282 for (vma = first_vma(current, gate_vma); vma != NULL;
60283 vma = next_vma(vma, gate_vma))
60284- size += vma_dump_size(vma, mm_flags);
60285+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60286 return size;
60287 }
60288
60289@@ -2108,7 +2588,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60290
60291 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
60292
60293- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
60294+ offset += elf_core_vma_data_size(gate_vma, cprm);
60295 offset += elf_core_extra_data_size();
60296 e_shoff = offset;
60297
60298@@ -2136,7 +2616,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60299 phdr.p_offset = offset;
60300 phdr.p_vaddr = vma->vm_start;
60301 phdr.p_paddr = 0;
60302- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
60303+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60304 phdr.p_memsz = vma->vm_end - vma->vm_start;
60305 offset += phdr.p_filesz;
60306 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
60307@@ -2169,7 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60308 unsigned long addr;
60309 unsigned long end;
60310
60311- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
60312+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60313
60314 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
60315 struct page *page;
60316@@ -2210,6 +2690,167 @@ out:
60317
60318 #endif /* CONFIG_ELF_CORE */
60319
60320+#ifdef CONFIG_PAX_MPROTECT
60321+/* PaX: non-PIC ELF libraries need relocations on their executable segments
60322+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
60323+ * we'll remove VM_MAYWRITE for good on RELRO segments.
60324+ *
60325+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
60326+ * basis because we want to allow the common case and not the special ones.
60327+ */
60328+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
60329+{
60330+ struct elfhdr elf_h;
60331+ struct elf_phdr elf_p;
60332+ unsigned long i;
60333+ unsigned long oldflags;
60334+ bool is_textrel_rw, is_textrel_rx, is_relro;
60335+
60336+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
60337+ return;
60338+
60339+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
60340+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
60341+
60342+#ifdef CONFIG_PAX_ELFRELOCS
60343+ /* possible TEXTREL */
60344+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
60345+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
60346+#else
60347+ is_textrel_rw = false;
60348+ is_textrel_rx = false;
60349+#endif
60350+
60351+ /* possible RELRO */
60352+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
60353+
60354+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
60355+ return;
60356+
60357+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60358+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60359+
60360+#ifdef CONFIG_PAX_ETEXECRELOCS
60361+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60362+#else
60363+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
60364+#endif
60365+
60366+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60367+ !elf_check_arch(&elf_h) ||
60368+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60369+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60370+ return;
60371+
60372+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60373+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60374+ return;
60375+ switch (elf_p.p_type) {
60376+ case PT_DYNAMIC:
60377+ if (!is_textrel_rw && !is_textrel_rx)
60378+ continue;
60379+ i = 0UL;
60380+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
60381+ elf_dyn dyn;
60382+
60383+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
60384+ break;
60385+ if (dyn.d_tag == DT_NULL)
60386+ break;
60387+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
60388+ gr_log_textrel(vma);
60389+ if (is_textrel_rw)
60390+ vma->vm_flags |= VM_MAYWRITE;
60391+ else
60392+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
60393+ vma->vm_flags &= ~VM_MAYWRITE;
60394+ break;
60395+ }
60396+ i++;
60397+ }
60398+ is_textrel_rw = false;
60399+ is_textrel_rx = false;
60400+ continue;
60401+
60402+ case PT_GNU_RELRO:
60403+ if (!is_relro)
60404+ continue;
60405+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
60406+ vma->vm_flags &= ~VM_MAYWRITE;
60407+ is_relro = false;
60408+ continue;
60409+
60410+#ifdef CONFIG_PAX_PT_PAX_FLAGS
60411+ case PT_PAX_FLAGS: {
60412+ const char *msg_mprotect = "", *msg_emutramp = "";
60413+ char *buffer_lib, *buffer_exe;
60414+
60415+ if (elf_p.p_flags & PF_NOMPROTECT)
60416+ msg_mprotect = "MPROTECT disabled";
60417+
60418+#ifdef CONFIG_PAX_EMUTRAMP
60419+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
60420+ msg_emutramp = "EMUTRAMP enabled";
60421+#endif
60422+
60423+ if (!msg_mprotect[0] && !msg_emutramp[0])
60424+ continue;
60425+
60426+ if (!printk_ratelimit())
60427+ continue;
60428+
60429+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
60430+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
60431+ if (buffer_lib && buffer_exe) {
60432+ char *path_lib, *path_exe;
60433+
60434+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
60435+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
60436+
60437+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
60438+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
60439+
60440+ }
60441+ free_page((unsigned long)buffer_exe);
60442+ free_page((unsigned long)buffer_lib);
60443+ continue;
60444+ }
60445+#endif
60446+
60447+ }
60448+ }
60449+}
60450+#endif
60451+
60452+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60453+
60454+extern int grsec_enable_log_rwxmaps;
60455+
60456+static void elf_handle_mmap(struct file *file)
60457+{
60458+ struct elfhdr elf_h;
60459+ struct elf_phdr elf_p;
60460+ unsigned long i;
60461+
60462+ if (!grsec_enable_log_rwxmaps)
60463+ return;
60464+
60465+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60466+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60467+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
60468+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60469+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60470+ return;
60471+
60472+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60473+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60474+ return;
60475+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
60476+ gr_log_ptgnustack(file);
60477+ }
60478+}
60479+#endif
60480+
60481 static int __init init_elf_binfmt(void)
60482 {
60483 register_binfmt(&elf_format);
60484diff --git a/fs/block_dev.c b/fs/block_dev.c
60485index 6d72746..536d1db 100644
60486--- a/fs/block_dev.c
60487+++ b/fs/block_dev.c
60488@@ -701,7 +701,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
60489 else if (bdev->bd_contains == bdev)
60490 return true; /* is a whole device which isn't held */
60491
60492- else if (whole->bd_holder == bd_may_claim)
60493+ else if (whole->bd_holder == (void *)bd_may_claim)
60494 return true; /* is a partition of a device that is being partitioned */
60495 else if (whole->bd_holder != NULL)
60496 return false; /* is a partition of a held device */
60497diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
60498index aeab453..48dbafc 100644
60499--- a/fs/btrfs/ctree.c
60500+++ b/fs/btrfs/ctree.c
60501@@ -1184,9 +1184,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
60502 free_extent_buffer(buf);
60503 add_root_to_dirty_list(root);
60504 } else {
60505- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
60506- parent_start = parent->start;
60507- else
60508+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
60509+ if (parent)
60510+ parent_start = parent->start;
60511+ else
60512+ parent_start = 0;
60513+ } else
60514 parent_start = 0;
60515
60516 WARN_ON(trans->transid != btrfs_header_generation(parent));
60517diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
60518index da775bf..882da68 100644
60519--- a/fs/btrfs/delayed-inode.c
60520+++ b/fs/btrfs/delayed-inode.c
60521@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
60522
60523 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
60524 {
60525- int seq = atomic_inc_return(&delayed_root->items_seq);
60526+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
60527 if ((atomic_dec_return(&delayed_root->items) <
60528 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
60529 waitqueue_active(&delayed_root->wait))
60530@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
60531
60532 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
60533 {
60534- int val = atomic_read(&delayed_root->items_seq);
60535+ int val = atomic_read_unchecked(&delayed_root->items_seq);
60536
60537 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
60538 return 1;
60539@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
60540 int seq;
60541 int ret;
60542
60543- seq = atomic_read(&delayed_root->items_seq);
60544+ seq = atomic_read_unchecked(&delayed_root->items_seq);
60545
60546 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
60547 if (ret)
60548diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
60549index f70119f..ab5894d 100644
60550--- a/fs/btrfs/delayed-inode.h
60551+++ b/fs/btrfs/delayed-inode.h
60552@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
60553 */
60554 struct list_head prepare_list;
60555 atomic_t items; /* for delayed items */
60556- atomic_t items_seq; /* for delayed items */
60557+ atomic_unchecked_t items_seq; /* for delayed items */
60558 int nodes; /* for delayed nodes */
60559 wait_queue_head_t wait;
60560 };
60561@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
60562 struct btrfs_delayed_root *delayed_root)
60563 {
60564 atomic_set(&delayed_root->items, 0);
60565- atomic_set(&delayed_root->items_seq, 0);
60566+ atomic_set_unchecked(&delayed_root->items_seq, 0);
60567 delayed_root->nodes = 0;
60568 spin_lock_init(&delayed_root->lock);
60569 init_waitqueue_head(&delayed_root->wait);
60570diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
60571index 47aceb4..7d28b1c 100644
60572--- a/fs/btrfs/ioctl.c
60573+++ b/fs/btrfs/ioctl.c
60574@@ -3965,9 +3965,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
60575 for (i = 0; i < num_types; i++) {
60576 struct btrfs_space_info *tmp;
60577
60578+ /* Don't copy in more than we allocated */
60579 if (!slot_count)
60580 break;
60581
60582+ slot_count--;
60583+
60584 info = NULL;
60585 rcu_read_lock();
60586 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
60587@@ -3989,10 +3992,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
60588 memcpy(dest, &space, sizeof(space));
60589 dest++;
60590 space_args.total_spaces++;
60591- slot_count--;
60592 }
60593- if (!slot_count)
60594- break;
60595 }
60596 up_read(&info->groups_sem);
60597 }
60598diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
60599index 8e16bca..6eabd9e 100644
60600--- a/fs/btrfs/super.c
60601+++ b/fs/btrfs/super.c
60602@@ -270,7 +270,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
60603 function, line, errstr);
60604 return;
60605 }
60606- ACCESS_ONCE(trans->transaction->aborted) = errno;
60607+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
60608 /* Wake up anybody who may be waiting on this transaction */
60609 wake_up(&root->fs_info->transaction_wait);
60610 wake_up(&root->fs_info->transaction_blocked_wait);
60611diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
60612index 7869936..7e153dc 100644
60613--- a/fs/btrfs/sysfs.c
60614+++ b/fs/btrfs/sysfs.c
60615@@ -475,7 +475,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
60616 for (set = 0; set < FEAT_MAX; set++) {
60617 int i;
60618 struct attribute *attrs[2];
60619- struct attribute_group agroup = {
60620+ attribute_group_no_const agroup = {
60621 .name = "features",
60622 .attrs = attrs,
60623 };
60624diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
60625index 7f5b41b..e589c13 100644
60626--- a/fs/btrfs/tree-log.h
60627+++ b/fs/btrfs/tree-log.h
60628@@ -41,7 +41,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
60629 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
60630 struct btrfs_trans_handle *trans)
60631 {
60632- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
60633+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
60634 }
60635
60636 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
60637diff --git a/fs/buffer.c b/fs/buffer.c
60638index eba6e4f..af1182c 100644
60639--- a/fs/buffer.c
60640+++ b/fs/buffer.c
60641@@ -3429,7 +3429,7 @@ void __init buffer_init(void)
60642 bh_cachep = kmem_cache_create("buffer_head",
60643 sizeof(struct buffer_head), 0,
60644 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
60645- SLAB_MEM_SPREAD),
60646+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
60647 NULL);
60648
60649 /*
60650diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
60651index d749731..dd333a6 100644
60652--- a/fs/cachefiles/bind.c
60653+++ b/fs/cachefiles/bind.c
60654@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
60655 args);
60656
60657 /* start by checking things over */
60658- ASSERT(cache->fstop_percent >= 0 &&
60659- cache->fstop_percent < cache->fcull_percent &&
60660+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
60661 cache->fcull_percent < cache->frun_percent &&
60662 cache->frun_percent < 100);
60663
60664- ASSERT(cache->bstop_percent >= 0 &&
60665- cache->bstop_percent < cache->bcull_percent &&
60666+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
60667 cache->bcull_percent < cache->brun_percent &&
60668 cache->brun_percent < 100);
60669
60670diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
60671index b078d30..db23012 100644
60672--- a/fs/cachefiles/daemon.c
60673+++ b/fs/cachefiles/daemon.c
60674@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
60675 if (n > buflen)
60676 return -EMSGSIZE;
60677
60678- if (copy_to_user(_buffer, buffer, n) != 0)
60679+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
60680 return -EFAULT;
60681
60682 return n;
60683@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
60684 if (test_bit(CACHEFILES_DEAD, &cache->flags))
60685 return -EIO;
60686
60687- if (datalen < 0 || datalen > PAGE_SIZE - 1)
60688+ if (datalen > PAGE_SIZE - 1)
60689 return -EOPNOTSUPP;
60690
60691 /* drag the command string into the kernel so we can parse it */
60692@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
60693 if (args[0] != '%' || args[1] != '\0')
60694 return -EINVAL;
60695
60696- if (fstop < 0 || fstop >= cache->fcull_percent)
60697+ if (fstop >= cache->fcull_percent)
60698 return cachefiles_daemon_range_error(cache, args);
60699
60700 cache->fstop_percent = fstop;
60701@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
60702 if (args[0] != '%' || args[1] != '\0')
60703 return -EINVAL;
60704
60705- if (bstop < 0 || bstop >= cache->bcull_percent)
60706+ if (bstop >= cache->bcull_percent)
60707 return cachefiles_daemon_range_error(cache, args);
60708
60709 cache->bstop_percent = bstop;
60710diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
60711index 3d50998..0550d67 100644
60712--- a/fs/cachefiles/internal.h
60713+++ b/fs/cachefiles/internal.h
60714@@ -66,7 +66,7 @@ struct cachefiles_cache {
60715 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
60716 struct rb_root active_nodes; /* active nodes (can't be culled) */
60717 rwlock_t active_lock; /* lock for active_nodes */
60718- atomic_t gravecounter; /* graveyard uniquifier */
60719+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
60720 unsigned frun_percent; /* when to stop culling (% files) */
60721 unsigned fcull_percent; /* when to start culling (% files) */
60722 unsigned fstop_percent; /* when to stop allocating (% files) */
60723@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
60724 * proc.c
60725 */
60726 #ifdef CONFIG_CACHEFILES_HISTOGRAM
60727-extern atomic_t cachefiles_lookup_histogram[HZ];
60728-extern atomic_t cachefiles_mkdir_histogram[HZ];
60729-extern atomic_t cachefiles_create_histogram[HZ];
60730+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
60731+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
60732+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
60733
60734 extern int __init cachefiles_proc_init(void);
60735 extern void cachefiles_proc_cleanup(void);
60736 static inline
60737-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
60738+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
60739 {
60740 unsigned long jif = jiffies - start_jif;
60741 if (jif >= HZ)
60742 jif = HZ - 1;
60743- atomic_inc(&histogram[jif]);
60744+ atomic_inc_unchecked(&histogram[jif]);
60745 }
60746
60747 #else
60748diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
60749index 5bf2b41..85b93f9 100644
60750--- a/fs/cachefiles/namei.c
60751+++ b/fs/cachefiles/namei.c
60752@@ -312,7 +312,7 @@ try_again:
60753 /* first step is to make up a grave dentry in the graveyard */
60754 sprintf(nbuffer, "%08x%08x",
60755 (uint32_t) get_seconds(),
60756- (uint32_t) atomic_inc_return(&cache->gravecounter));
60757+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
60758
60759 /* do the multiway lock magic */
60760 trap = lock_rename(cache->graveyard, dir);
60761diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
60762index eccd339..4c1d995 100644
60763--- a/fs/cachefiles/proc.c
60764+++ b/fs/cachefiles/proc.c
60765@@ -14,9 +14,9 @@
60766 #include <linux/seq_file.h>
60767 #include "internal.h"
60768
60769-atomic_t cachefiles_lookup_histogram[HZ];
60770-atomic_t cachefiles_mkdir_histogram[HZ];
60771-atomic_t cachefiles_create_histogram[HZ];
60772+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
60773+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
60774+atomic_unchecked_t cachefiles_create_histogram[HZ];
60775
60776 /*
60777 * display the latency histogram
60778@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
60779 return 0;
60780 default:
60781 index = (unsigned long) v - 3;
60782- x = atomic_read(&cachefiles_lookup_histogram[index]);
60783- y = atomic_read(&cachefiles_mkdir_histogram[index]);
60784- z = atomic_read(&cachefiles_create_histogram[index]);
60785+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
60786+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
60787+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
60788 if (x == 0 && y == 0 && z == 0)
60789 return 0;
60790
60791diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
60792index 4b1fb5c..0d2a699 100644
60793--- a/fs/cachefiles/rdwr.c
60794+++ b/fs/cachefiles/rdwr.c
60795@@ -943,7 +943,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
60796 old_fs = get_fs();
60797 set_fs(KERNEL_DS);
60798 ret = file->f_op->write(
60799- file, (const void __user *) data, len, &pos);
60800+ file, (const void __force_user *) data, len, &pos);
60801 set_fs(old_fs);
60802 kunmap(page);
60803 file_end_write(file);
60804diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
60805index c29d6ae..a56c4ae 100644
60806--- a/fs/ceph/dir.c
60807+++ b/fs/ceph/dir.c
60808@@ -250,7 +250,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
60809 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
60810 struct ceph_mds_client *mdsc = fsc->mdsc;
60811 unsigned frag = fpos_frag(ctx->pos);
60812- int off = fpos_off(ctx->pos);
60813+ unsigned int off = fpos_off(ctx->pos);
60814 int err;
60815 u32 ftype;
60816 struct ceph_mds_reply_info_parsed *rinfo;
60817diff --git a/fs/ceph/super.c b/fs/ceph/super.c
60818index 06150fd..192061b 100644
60819--- a/fs/ceph/super.c
60820+++ b/fs/ceph/super.c
60821@@ -895,7 +895,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
60822 /*
60823 * construct our own bdi so we can control readahead, etc.
60824 */
60825-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
60826+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
60827
60828 static int ceph_register_bdi(struct super_block *sb,
60829 struct ceph_fs_client *fsc)
60830@@ -912,7 +912,7 @@ static int ceph_register_bdi(struct super_block *sb,
60831 default_backing_dev_info.ra_pages;
60832
60833 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
60834- atomic_long_inc_return(&bdi_seq));
60835+ atomic_long_inc_return_unchecked(&bdi_seq));
60836 if (!err)
60837 sb->s_bdi = &fsc->backing_dev_info;
60838 return err;
60839diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
60840index f3ac415..3d2420c 100644
60841--- a/fs/cifs/cifs_debug.c
60842+++ b/fs/cifs/cifs_debug.c
60843@@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
60844
60845 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
60846 #ifdef CONFIG_CIFS_STATS2
60847- atomic_set(&totBufAllocCount, 0);
60848- atomic_set(&totSmBufAllocCount, 0);
60849+ atomic_set_unchecked(&totBufAllocCount, 0);
60850+ atomic_set_unchecked(&totSmBufAllocCount, 0);
60851 #endif /* CONFIG_CIFS_STATS2 */
60852 spin_lock(&cifs_tcp_ses_lock);
60853 list_for_each(tmp1, &cifs_tcp_ses_list) {
60854@@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
60855 tcon = list_entry(tmp3,
60856 struct cifs_tcon,
60857 tcon_list);
60858- atomic_set(&tcon->num_smbs_sent, 0);
60859+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
60860 if (server->ops->clear_stats)
60861 server->ops->clear_stats(tcon);
60862 }
60863@@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
60864 smBufAllocCount.counter, cifs_min_small);
60865 #ifdef CONFIG_CIFS_STATS2
60866 seq_printf(m, "Total Large %d Small %d Allocations\n",
60867- atomic_read(&totBufAllocCount),
60868- atomic_read(&totSmBufAllocCount));
60869+ atomic_read_unchecked(&totBufAllocCount),
60870+ atomic_read_unchecked(&totSmBufAllocCount));
60871 #endif /* CONFIG_CIFS_STATS2 */
60872
60873 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
60874@@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
60875 if (tcon->need_reconnect)
60876 seq_puts(m, "\tDISCONNECTED ");
60877 seq_printf(m, "\nSMBs: %d",
60878- atomic_read(&tcon->num_smbs_sent));
60879+ atomic_read_unchecked(&tcon->num_smbs_sent));
60880 if (server->ops->print_stats)
60881 server->ops->print_stats(m, tcon);
60882 }
60883diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
60884index 8883980..c8ade72 100644
60885--- a/fs/cifs/cifsfs.c
60886+++ b/fs/cifs/cifsfs.c
60887@@ -1072,7 +1072,7 @@ cifs_init_request_bufs(void)
60888 */
60889 cifs_req_cachep = kmem_cache_create("cifs_request",
60890 CIFSMaxBufSize + max_hdr_size, 0,
60891- SLAB_HWCACHE_ALIGN, NULL);
60892+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
60893 if (cifs_req_cachep == NULL)
60894 return -ENOMEM;
60895
60896@@ -1099,7 +1099,7 @@ cifs_init_request_bufs(void)
60897 efficient to alloc 1 per page off the slab compared to 17K (5page)
60898 alloc of large cifs buffers even when page debugging is on */
60899 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
60900- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
60901+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
60902 NULL);
60903 if (cifs_sm_req_cachep == NULL) {
60904 mempool_destroy(cifs_req_poolp);
60905@@ -1184,8 +1184,8 @@ init_cifs(void)
60906 atomic_set(&bufAllocCount, 0);
60907 atomic_set(&smBufAllocCount, 0);
60908 #ifdef CONFIG_CIFS_STATS2
60909- atomic_set(&totBufAllocCount, 0);
60910- atomic_set(&totSmBufAllocCount, 0);
60911+ atomic_set_unchecked(&totBufAllocCount, 0);
60912+ atomic_set_unchecked(&totSmBufAllocCount, 0);
60913 #endif /* CONFIG_CIFS_STATS2 */
60914
60915 atomic_set(&midCount, 0);
60916diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
60917index de6aed8..a0a76fd 100644
60918--- a/fs/cifs/cifsglob.h
60919+++ b/fs/cifs/cifsglob.h
60920@@ -807,35 +807,35 @@ struct cifs_tcon {
60921 __u16 Flags; /* optional support bits */
60922 enum statusEnum tidStatus;
60923 #ifdef CONFIG_CIFS_STATS
60924- atomic_t num_smbs_sent;
60925+ atomic_unchecked_t num_smbs_sent;
60926 union {
60927 struct {
60928- atomic_t num_writes;
60929- atomic_t num_reads;
60930- atomic_t num_flushes;
60931- atomic_t num_oplock_brks;
60932- atomic_t num_opens;
60933- atomic_t num_closes;
60934- atomic_t num_deletes;
60935- atomic_t num_mkdirs;
60936- atomic_t num_posixopens;
60937- atomic_t num_posixmkdirs;
60938- atomic_t num_rmdirs;
60939- atomic_t num_renames;
60940- atomic_t num_t2renames;
60941- atomic_t num_ffirst;
60942- atomic_t num_fnext;
60943- atomic_t num_fclose;
60944- atomic_t num_hardlinks;
60945- atomic_t num_symlinks;
60946- atomic_t num_locks;
60947- atomic_t num_acl_get;
60948- atomic_t num_acl_set;
60949+ atomic_unchecked_t num_writes;
60950+ atomic_unchecked_t num_reads;
60951+ atomic_unchecked_t num_flushes;
60952+ atomic_unchecked_t num_oplock_brks;
60953+ atomic_unchecked_t num_opens;
60954+ atomic_unchecked_t num_closes;
60955+ atomic_unchecked_t num_deletes;
60956+ atomic_unchecked_t num_mkdirs;
60957+ atomic_unchecked_t num_posixopens;
60958+ atomic_unchecked_t num_posixmkdirs;
60959+ atomic_unchecked_t num_rmdirs;
60960+ atomic_unchecked_t num_renames;
60961+ atomic_unchecked_t num_t2renames;
60962+ atomic_unchecked_t num_ffirst;
60963+ atomic_unchecked_t num_fnext;
60964+ atomic_unchecked_t num_fclose;
60965+ atomic_unchecked_t num_hardlinks;
60966+ atomic_unchecked_t num_symlinks;
60967+ atomic_unchecked_t num_locks;
60968+ atomic_unchecked_t num_acl_get;
60969+ atomic_unchecked_t num_acl_set;
60970 } cifs_stats;
60971 #ifdef CONFIG_CIFS_SMB2
60972 struct {
60973- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60974- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60975+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60976+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60977 } smb2_stats;
60978 #endif /* CONFIG_CIFS_SMB2 */
60979 } stats;
60980@@ -1172,7 +1172,7 @@ convert_delimiter(char *path, char delim)
60981 }
60982
60983 #ifdef CONFIG_CIFS_STATS
60984-#define cifs_stats_inc atomic_inc
60985+#define cifs_stats_inc atomic_inc_unchecked
60986
60987 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
60988 unsigned int bytes)
60989@@ -1538,8 +1538,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
60990 /* Various Debug counters */
60991 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
60992 #ifdef CONFIG_CIFS_STATS2
60993-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
60994-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
60995+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
60996+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
60997 #endif
60998 GLOBAL_EXTERN atomic_t smBufAllocCount;
60999 GLOBAL_EXTERN atomic_t midCount;
61000diff --git a/fs/cifs/file.c b/fs/cifs/file.c
61001index e90a1e9..908699d 100644
61002--- a/fs/cifs/file.c
61003+++ b/fs/cifs/file.c
61004@@ -1900,10 +1900,14 @@ static int cifs_writepages(struct address_space *mapping,
61005 index = mapping->writeback_index; /* Start from prev offset */
61006 end = -1;
61007 } else {
61008- index = wbc->range_start >> PAGE_CACHE_SHIFT;
61009- end = wbc->range_end >> PAGE_CACHE_SHIFT;
61010- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
61011+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
61012 range_whole = true;
61013+ index = 0;
61014+ end = ULONG_MAX;
61015+ } else {
61016+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
61017+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
61018+ }
61019 scanned = true;
61020 }
61021 retry:
61022diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
61023index 3b0c62e..f7d090c 100644
61024--- a/fs/cifs/misc.c
61025+++ b/fs/cifs/misc.c
61026@@ -170,7 +170,7 @@ cifs_buf_get(void)
61027 memset(ret_buf, 0, buf_size + 3);
61028 atomic_inc(&bufAllocCount);
61029 #ifdef CONFIG_CIFS_STATS2
61030- atomic_inc(&totBufAllocCount);
61031+ atomic_inc_unchecked(&totBufAllocCount);
61032 #endif /* CONFIG_CIFS_STATS2 */
61033 }
61034
61035@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
61036 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
61037 atomic_inc(&smBufAllocCount);
61038 #ifdef CONFIG_CIFS_STATS2
61039- atomic_inc(&totSmBufAllocCount);
61040+ atomic_inc_unchecked(&totSmBufAllocCount);
61041 #endif /* CONFIG_CIFS_STATS2 */
61042
61043 }
61044diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
61045index d1fdfa8..94558f8 100644
61046--- a/fs/cifs/smb1ops.c
61047+++ b/fs/cifs/smb1ops.c
61048@@ -626,27 +626,27 @@ static void
61049 cifs_clear_stats(struct cifs_tcon *tcon)
61050 {
61051 #ifdef CONFIG_CIFS_STATS
61052- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
61053- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
61054- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
61055- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
61056- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
61057- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
61058- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
61059- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
61060- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
61061- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
61062- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
61063- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
61064- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
61065- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
61066- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
61067- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
61068- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
61069- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
61070- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
61071- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
61072- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
61073+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
61074+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
61075+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
61076+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
61077+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
61078+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
61079+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
61080+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
61081+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
61082+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
61083+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
61084+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
61085+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
61086+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
61087+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
61088+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
61089+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
61090+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
61091+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
61092+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
61093+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
61094 #endif
61095 }
61096
61097@@ -655,36 +655,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
61098 {
61099 #ifdef CONFIG_CIFS_STATS
61100 seq_printf(m, " Oplocks breaks: %d",
61101- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
61102+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
61103 seq_printf(m, "\nReads: %d Bytes: %llu",
61104- atomic_read(&tcon->stats.cifs_stats.num_reads),
61105+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
61106 (long long)(tcon->bytes_read));
61107 seq_printf(m, "\nWrites: %d Bytes: %llu",
61108- atomic_read(&tcon->stats.cifs_stats.num_writes),
61109+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
61110 (long long)(tcon->bytes_written));
61111 seq_printf(m, "\nFlushes: %d",
61112- atomic_read(&tcon->stats.cifs_stats.num_flushes));
61113+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
61114 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
61115- atomic_read(&tcon->stats.cifs_stats.num_locks),
61116- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
61117- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
61118+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
61119+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
61120+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
61121 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
61122- atomic_read(&tcon->stats.cifs_stats.num_opens),
61123- atomic_read(&tcon->stats.cifs_stats.num_closes),
61124- atomic_read(&tcon->stats.cifs_stats.num_deletes));
61125+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
61126+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
61127+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
61128 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
61129- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
61130- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
61131+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
61132+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
61133 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
61134- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
61135- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
61136+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
61137+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
61138 seq_printf(m, "\nRenames: %d T2 Renames %d",
61139- atomic_read(&tcon->stats.cifs_stats.num_renames),
61140- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
61141+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
61142+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
61143 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
61144- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
61145- atomic_read(&tcon->stats.cifs_stats.num_fnext),
61146- atomic_read(&tcon->stats.cifs_stats.num_fclose));
61147+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
61148+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
61149+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
61150 #endif
61151 }
61152
61153diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
61154index 787844b..8e7bc7d 100644
61155--- a/fs/cifs/smb2ops.c
61156+++ b/fs/cifs/smb2ops.c
61157@@ -364,8 +364,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
61158 #ifdef CONFIG_CIFS_STATS
61159 int i;
61160 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
61161- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61162- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61163+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61164+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61165 }
61166 #endif
61167 }
61168@@ -405,65 +405,65 @@ static void
61169 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
61170 {
61171 #ifdef CONFIG_CIFS_STATS
61172- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61173- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61174+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61175+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61176 seq_printf(m, "\nNegotiates: %d sent %d failed",
61177- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
61178- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
61179+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
61180+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
61181 seq_printf(m, "\nSessionSetups: %d sent %d failed",
61182- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
61183- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
61184+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
61185+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
61186 seq_printf(m, "\nLogoffs: %d sent %d failed",
61187- atomic_read(&sent[SMB2_LOGOFF_HE]),
61188- atomic_read(&failed[SMB2_LOGOFF_HE]));
61189+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
61190+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
61191 seq_printf(m, "\nTreeConnects: %d sent %d failed",
61192- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
61193- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
61194+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
61195+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
61196 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
61197- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
61198- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
61199+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
61200+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
61201 seq_printf(m, "\nCreates: %d sent %d failed",
61202- atomic_read(&sent[SMB2_CREATE_HE]),
61203- atomic_read(&failed[SMB2_CREATE_HE]));
61204+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
61205+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
61206 seq_printf(m, "\nCloses: %d sent %d failed",
61207- atomic_read(&sent[SMB2_CLOSE_HE]),
61208- atomic_read(&failed[SMB2_CLOSE_HE]));
61209+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
61210+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
61211 seq_printf(m, "\nFlushes: %d sent %d failed",
61212- atomic_read(&sent[SMB2_FLUSH_HE]),
61213- atomic_read(&failed[SMB2_FLUSH_HE]));
61214+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
61215+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
61216 seq_printf(m, "\nReads: %d sent %d failed",
61217- atomic_read(&sent[SMB2_READ_HE]),
61218- atomic_read(&failed[SMB2_READ_HE]));
61219+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
61220+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
61221 seq_printf(m, "\nWrites: %d sent %d failed",
61222- atomic_read(&sent[SMB2_WRITE_HE]),
61223- atomic_read(&failed[SMB2_WRITE_HE]));
61224+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
61225+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
61226 seq_printf(m, "\nLocks: %d sent %d failed",
61227- atomic_read(&sent[SMB2_LOCK_HE]),
61228- atomic_read(&failed[SMB2_LOCK_HE]));
61229+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
61230+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
61231 seq_printf(m, "\nIOCTLs: %d sent %d failed",
61232- atomic_read(&sent[SMB2_IOCTL_HE]),
61233- atomic_read(&failed[SMB2_IOCTL_HE]));
61234+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
61235+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
61236 seq_printf(m, "\nCancels: %d sent %d failed",
61237- atomic_read(&sent[SMB2_CANCEL_HE]),
61238- atomic_read(&failed[SMB2_CANCEL_HE]));
61239+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
61240+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
61241 seq_printf(m, "\nEchos: %d sent %d failed",
61242- atomic_read(&sent[SMB2_ECHO_HE]),
61243- atomic_read(&failed[SMB2_ECHO_HE]));
61244+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
61245+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
61246 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
61247- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
61248- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
61249+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
61250+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
61251 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
61252- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
61253- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
61254+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
61255+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
61256 seq_printf(m, "\nQueryInfos: %d sent %d failed",
61257- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
61258- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
61259+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
61260+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
61261 seq_printf(m, "\nSetInfos: %d sent %d failed",
61262- atomic_read(&sent[SMB2_SET_INFO_HE]),
61263- atomic_read(&failed[SMB2_SET_INFO_HE]));
61264+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
61265+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
61266 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
61267- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
61268- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
61269+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
61270+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
61271 #endif
61272 }
61273
61274diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
61275index b0b260d..c8927e1 100644
61276--- a/fs/cifs/smb2pdu.c
61277+++ b/fs/cifs/smb2pdu.c
61278@@ -2105,8 +2105,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
61279 default:
61280 cifs_dbg(VFS, "info level %u isn't supported\n",
61281 srch_inf->info_level);
61282- rc = -EINVAL;
61283- goto qdir_exit;
61284+ return -EINVAL;
61285 }
61286
61287 req->FileIndex = cpu_to_le32(index);
61288diff --git a/fs/coda/cache.c b/fs/coda/cache.c
61289index 1da168c..8bc7ff6 100644
61290--- a/fs/coda/cache.c
61291+++ b/fs/coda/cache.c
61292@@ -24,7 +24,7 @@
61293 #include "coda_linux.h"
61294 #include "coda_cache.h"
61295
61296-static atomic_t permission_epoch = ATOMIC_INIT(0);
61297+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
61298
61299 /* replace or extend an acl cache hit */
61300 void coda_cache_enter(struct inode *inode, int mask)
61301@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
61302 struct coda_inode_info *cii = ITOC(inode);
61303
61304 spin_lock(&cii->c_lock);
61305- cii->c_cached_epoch = atomic_read(&permission_epoch);
61306+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
61307 if (!uid_eq(cii->c_uid, current_fsuid())) {
61308 cii->c_uid = current_fsuid();
61309 cii->c_cached_perm = mask;
61310@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
61311 {
61312 struct coda_inode_info *cii = ITOC(inode);
61313 spin_lock(&cii->c_lock);
61314- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
61315+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
61316 spin_unlock(&cii->c_lock);
61317 }
61318
61319 /* remove all acl caches */
61320 void coda_cache_clear_all(struct super_block *sb)
61321 {
61322- atomic_inc(&permission_epoch);
61323+ atomic_inc_unchecked(&permission_epoch);
61324 }
61325
61326
61327@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
61328 spin_lock(&cii->c_lock);
61329 hit = (mask & cii->c_cached_perm) == mask &&
61330 uid_eq(cii->c_uid, current_fsuid()) &&
61331- cii->c_cached_epoch == atomic_read(&permission_epoch);
61332+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
61333 spin_unlock(&cii->c_lock);
61334
61335 return hit;
61336diff --git a/fs/compat.c b/fs/compat.c
61337index 66d3d3c..9c10175 100644
61338--- a/fs/compat.c
61339+++ b/fs/compat.c
61340@@ -54,7 +54,7 @@
61341 #include <asm/ioctls.h>
61342 #include "internal.h"
61343
61344-int compat_log = 1;
61345+int compat_log = 0;
61346
61347 int compat_printk(const char *fmt, ...)
61348 {
61349@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
61350
61351 set_fs(KERNEL_DS);
61352 /* The __user pointer cast is valid because of the set_fs() */
61353- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
61354+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
61355 set_fs(oldfs);
61356 /* truncating is ok because it's a user address */
61357 if (!ret)
61358@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
61359 goto out;
61360
61361 ret = -EINVAL;
61362- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
61363+ if (nr_segs > UIO_MAXIOV)
61364 goto out;
61365 if (nr_segs > fast_segs) {
61366 ret = -ENOMEM;
61367@@ -850,6 +850,7 @@ struct compat_old_linux_dirent {
61368 struct compat_readdir_callback {
61369 struct dir_context ctx;
61370 struct compat_old_linux_dirent __user *dirent;
61371+ struct file * file;
61372 int result;
61373 };
61374
61375@@ -867,6 +868,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
61376 buf->result = -EOVERFLOW;
61377 return -EOVERFLOW;
61378 }
61379+
61380+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61381+ return 0;
61382+
61383 buf->result++;
61384 dirent = buf->dirent;
61385 if (!access_ok(VERIFY_WRITE, dirent,
61386@@ -898,6 +903,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
61387 if (!f.file)
61388 return -EBADF;
61389
61390+ buf.file = f.file;
61391 error = iterate_dir(f.file, &buf.ctx);
61392 if (buf.result)
61393 error = buf.result;
61394@@ -917,6 +923,7 @@ struct compat_getdents_callback {
61395 struct dir_context ctx;
61396 struct compat_linux_dirent __user *current_dir;
61397 struct compat_linux_dirent __user *previous;
61398+ struct file * file;
61399 int count;
61400 int error;
61401 };
61402@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
61403 buf->error = -EOVERFLOW;
61404 return -EOVERFLOW;
61405 }
61406+
61407+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61408+ return 0;
61409+
61410 dirent = buf->previous;
61411 if (dirent) {
61412 if (__put_user(offset, &dirent->d_off))
61413@@ -983,6 +994,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
61414 if (!f.file)
61415 return -EBADF;
61416
61417+ buf.file = f.file;
61418 error = iterate_dir(f.file, &buf.ctx);
61419 if (error >= 0)
61420 error = buf.error;
61421@@ -1003,6 +1015,7 @@ struct compat_getdents_callback64 {
61422 struct dir_context ctx;
61423 struct linux_dirent64 __user *current_dir;
61424 struct linux_dirent64 __user *previous;
61425+ struct file * file;
61426 int count;
61427 int error;
61428 };
61429@@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
61430 buf->error = -EINVAL; /* only used if we fail.. */
61431 if (reclen > buf->count)
61432 return -EINVAL;
61433+
61434+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61435+ return 0;
61436+
61437 dirent = buf->previous;
61438
61439 if (dirent) {
61440@@ -1068,6 +1085,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
61441 if (!f.file)
61442 return -EBADF;
61443
61444+ buf.file = f.file;
61445 error = iterate_dir(f.file, &buf.ctx);
61446 if (error >= 0)
61447 error = buf.error;
61448diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
61449index 4d24d17..4f8c09e 100644
61450--- a/fs/compat_binfmt_elf.c
61451+++ b/fs/compat_binfmt_elf.c
61452@@ -30,11 +30,13 @@
61453 #undef elf_phdr
61454 #undef elf_shdr
61455 #undef elf_note
61456+#undef elf_dyn
61457 #undef elf_addr_t
61458 #define elfhdr elf32_hdr
61459 #define elf_phdr elf32_phdr
61460 #define elf_shdr elf32_shdr
61461 #define elf_note elf32_note
61462+#define elf_dyn Elf32_Dyn
61463 #define elf_addr_t Elf32_Addr
61464
61465 /*
61466diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
61467index e822890..fed89d9 100644
61468--- a/fs/compat_ioctl.c
61469+++ b/fs/compat_ioctl.c
61470@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
61471 return -EFAULT;
61472 if (__get_user(udata, &ss32->iomem_base))
61473 return -EFAULT;
61474- ss.iomem_base = compat_ptr(udata);
61475+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
61476 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
61477 __get_user(ss.port_high, &ss32->port_high))
61478 return -EFAULT;
61479@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
61480 for (i = 0; i < nmsgs; i++) {
61481 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
61482 return -EFAULT;
61483- if (get_user(datap, &umsgs[i].buf) ||
61484- put_user(compat_ptr(datap), &tmsgs[i].buf))
61485+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
61486+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
61487 return -EFAULT;
61488 }
61489 return sys_ioctl(fd, cmd, (unsigned long)tdata);
61490@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
61491 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
61492 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
61493 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
61494- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
61495+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
61496 return -EFAULT;
61497
61498 return ioctl_preallocate(file, p);
61499@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
61500 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
61501 {
61502 unsigned int a, b;
61503- a = *(unsigned int *)p;
61504- b = *(unsigned int *)q;
61505+ a = *(const unsigned int *)p;
61506+ b = *(const unsigned int *)q;
61507 if (a > b)
61508 return 1;
61509 if (a < b)
61510diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
61511index 668dcab..daebcd6 100644
61512--- a/fs/configfs/dir.c
61513+++ b/fs/configfs/dir.c
61514@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61515 }
61516 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
61517 struct configfs_dirent *next;
61518- const char *name;
61519+ const unsigned char * name;
61520+ char d_name[sizeof(next->s_dentry->d_iname)];
61521 int len;
61522 struct inode *inode = NULL;
61523
61524@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61525 continue;
61526
61527 name = configfs_get_name(next);
61528- len = strlen(name);
61529+ if (next->s_dentry && name == next->s_dentry->d_iname) {
61530+ len = next->s_dentry->d_name.len;
61531+ memcpy(d_name, name, len);
61532+ name = d_name;
61533+ } else
61534+ len = strlen(name);
61535
61536 /*
61537 * We'll have a dentry and an inode for
61538diff --git a/fs/coredump.c b/fs/coredump.c
61539index a93f7e6..d58bcbe 100644
61540--- a/fs/coredump.c
61541+++ b/fs/coredump.c
61542@@ -442,8 +442,8 @@ static void wait_for_dump_helpers(struct file *file)
61543 struct pipe_inode_info *pipe = file->private_data;
61544
61545 pipe_lock(pipe);
61546- pipe->readers++;
61547- pipe->writers--;
61548+ atomic_inc(&pipe->readers);
61549+ atomic_dec(&pipe->writers);
61550 wake_up_interruptible_sync(&pipe->wait);
61551 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
61552 pipe_unlock(pipe);
61553@@ -452,11 +452,11 @@ static void wait_for_dump_helpers(struct file *file)
61554 * We actually want wait_event_freezable() but then we need
61555 * to clear TIF_SIGPENDING and improve dump_interrupted().
61556 */
61557- wait_event_interruptible(pipe->wait, pipe->readers == 1);
61558+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
61559
61560 pipe_lock(pipe);
61561- pipe->readers--;
61562- pipe->writers++;
61563+ atomic_dec(&pipe->readers);
61564+ atomic_inc(&pipe->writers);
61565 pipe_unlock(pipe);
61566 }
61567
61568@@ -503,7 +503,9 @@ void do_coredump(const siginfo_t *siginfo)
61569 struct files_struct *displaced;
61570 bool need_nonrelative = false;
61571 bool core_dumped = false;
61572- static atomic_t core_dump_count = ATOMIC_INIT(0);
61573+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
61574+ long signr = siginfo->si_signo;
61575+ int dumpable;
61576 struct coredump_params cprm = {
61577 .siginfo = siginfo,
61578 .regs = signal_pt_regs(),
61579@@ -516,12 +518,17 @@ void do_coredump(const siginfo_t *siginfo)
61580 .mm_flags = mm->flags,
61581 };
61582
61583- audit_core_dumps(siginfo->si_signo);
61584+ audit_core_dumps(signr);
61585+
61586+ dumpable = __get_dumpable(cprm.mm_flags);
61587+
61588+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
61589+ gr_handle_brute_attach(dumpable);
61590
61591 binfmt = mm->binfmt;
61592 if (!binfmt || !binfmt->core_dump)
61593 goto fail;
61594- if (!__get_dumpable(cprm.mm_flags))
61595+ if (!dumpable)
61596 goto fail;
61597
61598 cred = prepare_creds();
61599@@ -540,7 +547,7 @@ void do_coredump(const siginfo_t *siginfo)
61600 need_nonrelative = true;
61601 }
61602
61603- retval = coredump_wait(siginfo->si_signo, &core_state);
61604+ retval = coredump_wait(signr, &core_state);
61605 if (retval < 0)
61606 goto fail_creds;
61607
61608@@ -583,7 +590,7 @@ void do_coredump(const siginfo_t *siginfo)
61609 }
61610 cprm.limit = RLIM_INFINITY;
61611
61612- dump_count = atomic_inc_return(&core_dump_count);
61613+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
61614 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
61615 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
61616 task_tgid_vnr(current), current->comm);
61617@@ -615,6 +622,8 @@ void do_coredump(const siginfo_t *siginfo)
61618 } else {
61619 struct inode *inode;
61620
61621+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
61622+
61623 if (cprm.limit < binfmt->min_coredump)
61624 goto fail_unlock;
61625
61626@@ -673,7 +682,7 @@ close_fail:
61627 filp_close(cprm.file, NULL);
61628 fail_dropcount:
61629 if (ispipe)
61630- atomic_dec(&core_dump_count);
61631+ atomic_dec_unchecked(&core_dump_count);
61632 fail_unlock:
61633 kfree(cn.corename);
61634 coredump_finish(mm, core_dumped);
61635@@ -694,6 +703,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
61636 struct file *file = cprm->file;
61637 loff_t pos = file->f_pos;
61638 ssize_t n;
61639+
61640+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
61641 if (cprm->written + nr > cprm->limit)
61642 return 0;
61643 while (nr) {
61644diff --git a/fs/dcache.c b/fs/dcache.c
61645index 06f6585..f95a6d1 100644
61646--- a/fs/dcache.c
61647+++ b/fs/dcache.c
61648@@ -1445,7 +1445,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
61649 */
61650 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
61651 if (name->len > DNAME_INLINE_LEN-1) {
61652- dname = kmalloc(name->len + 1, GFP_KERNEL);
61653+ dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
61654 if (!dname) {
61655 kmem_cache_free(dentry_cache, dentry);
61656 return NULL;
61657@@ -3413,7 +3413,8 @@ void __init vfs_caches_init(unsigned long mempages)
61658 mempages -= reserve;
61659
61660 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
61661- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
61662+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
61663+ SLAB_NO_SANITIZE, NULL);
61664
61665 dcache_init();
61666 inode_init();
61667diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
61668index 8c41b52..145b1b9 100644
61669--- a/fs/debugfs/inode.c
61670+++ b/fs/debugfs/inode.c
61671@@ -416,7 +416,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
61672 */
61673 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
61674 {
61675+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
61676+ return __create_file(name, S_IFDIR | S_IRWXU,
61677+#else
61678 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
61679+#endif
61680 parent, NULL, NULL);
61681 }
61682 EXPORT_SYMBOL_GPL(debugfs_create_dir);
61683diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
61684index d4a9431..77f9b2e 100644
61685--- a/fs/ecryptfs/inode.c
61686+++ b/fs/ecryptfs/inode.c
61687@@ -673,7 +673,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
61688 old_fs = get_fs();
61689 set_fs(get_ds());
61690 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
61691- (char __user *)lower_buf,
61692+ (char __force_user *)lower_buf,
61693 PATH_MAX);
61694 set_fs(old_fs);
61695 if (rc < 0)
61696diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
61697index e4141f2..d8263e8 100644
61698--- a/fs/ecryptfs/miscdev.c
61699+++ b/fs/ecryptfs/miscdev.c
61700@@ -304,7 +304,7 @@ check_list:
61701 goto out_unlock_msg_ctx;
61702 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
61703 if (msg_ctx->msg) {
61704- if (copy_to_user(&buf[i], packet_length, packet_length_size))
61705+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
61706 goto out_unlock_msg_ctx;
61707 i += packet_length_size;
61708 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
61709diff --git a/fs/exec.c b/fs/exec.c
61710index a3d33fe..49e9bc9 100644
61711--- a/fs/exec.c
61712+++ b/fs/exec.c
61713@@ -56,8 +56,20 @@
61714 #include <linux/pipe_fs_i.h>
61715 #include <linux/oom.h>
61716 #include <linux/compat.h>
61717+#include <linux/random.h>
61718+#include <linux/seq_file.h>
61719+#include <linux/coredump.h>
61720+#include <linux/mman.h>
61721+
61722+#ifdef CONFIG_PAX_REFCOUNT
61723+#include <linux/kallsyms.h>
61724+#include <linux/kdebug.h>
61725+#endif
61726+
61727+#include <trace/events/fs.h>
61728
61729 #include <asm/uaccess.h>
61730+#include <asm/sections.h>
61731 #include <asm/mmu_context.h>
61732 #include <asm/tlb.h>
61733
61734@@ -66,19 +78,34 @@
61735
61736 #include <trace/events/sched.h>
61737
61738+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
61739+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
61740+{
61741+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
61742+}
61743+#endif
61744+
61745+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
61746+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
61747+EXPORT_SYMBOL(pax_set_initial_flags_func);
61748+#endif
61749+
61750 int suid_dumpable = 0;
61751
61752 static LIST_HEAD(formats);
61753 static DEFINE_RWLOCK(binfmt_lock);
61754
61755+extern int gr_process_kernel_exec_ban(void);
61756+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
61757+
61758 void __register_binfmt(struct linux_binfmt * fmt, int insert)
61759 {
61760 BUG_ON(!fmt);
61761 if (WARN_ON(!fmt->load_binary))
61762 return;
61763 write_lock(&binfmt_lock);
61764- insert ? list_add(&fmt->lh, &formats) :
61765- list_add_tail(&fmt->lh, &formats);
61766+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
61767+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
61768 write_unlock(&binfmt_lock);
61769 }
61770
61771@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
61772 void unregister_binfmt(struct linux_binfmt * fmt)
61773 {
61774 write_lock(&binfmt_lock);
61775- list_del(&fmt->lh);
61776+ pax_list_del((struct list_head *)&fmt->lh);
61777 write_unlock(&binfmt_lock);
61778 }
61779
61780@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61781 int write)
61782 {
61783 struct page *page;
61784- int ret;
61785
61786-#ifdef CONFIG_STACK_GROWSUP
61787- if (write) {
61788- ret = expand_downwards(bprm->vma, pos);
61789- if (ret < 0)
61790- return NULL;
61791- }
61792-#endif
61793- ret = get_user_pages(current, bprm->mm, pos,
61794- 1, write, 1, &page, NULL);
61795- if (ret <= 0)
61796+ if (0 > expand_downwards(bprm->vma, pos))
61797+ return NULL;
61798+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
61799 return NULL;
61800
61801 if (write) {
61802@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61803 if (size <= ARG_MAX)
61804 return page;
61805
61806+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61807+ // only allow 512KB for argv+env on suid/sgid binaries
61808+ // to prevent easy ASLR exhaustion
61809+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
61810+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
61811+ (size > (512 * 1024))) {
61812+ put_page(page);
61813+ return NULL;
61814+ }
61815+#endif
61816+
61817 /*
61818 * Limit to 1/4-th the stack size for the argv+env strings.
61819 * This ensures that:
61820@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61821 vma->vm_end = STACK_TOP_MAX;
61822 vma->vm_start = vma->vm_end - PAGE_SIZE;
61823 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
61824+
61825+#ifdef CONFIG_PAX_SEGMEXEC
61826+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
61827+#endif
61828+
61829 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
61830 INIT_LIST_HEAD(&vma->anon_vma_chain);
61831
61832@@ -279,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61833 mm->stack_vm = mm->total_vm = 1;
61834 up_write(&mm->mmap_sem);
61835 bprm->p = vma->vm_end - sizeof(void *);
61836+
61837+#ifdef CONFIG_PAX_RANDUSTACK
61838+ if (randomize_va_space)
61839+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
61840+#endif
61841+
61842 return 0;
61843 err:
61844 up_write(&mm->mmap_sem);
61845@@ -399,7 +440,7 @@ struct user_arg_ptr {
61846 } ptr;
61847 };
61848
61849-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61850+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61851 {
61852 const char __user *native;
61853
61854@@ -408,14 +449,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61855 compat_uptr_t compat;
61856
61857 if (get_user(compat, argv.ptr.compat + nr))
61858- return ERR_PTR(-EFAULT);
61859+ return (const char __force_user *)ERR_PTR(-EFAULT);
61860
61861 return compat_ptr(compat);
61862 }
61863 #endif
61864
61865 if (get_user(native, argv.ptr.native + nr))
61866- return ERR_PTR(-EFAULT);
61867+ return (const char __force_user *)ERR_PTR(-EFAULT);
61868
61869 return native;
61870 }
61871@@ -434,7 +475,7 @@ static int count(struct user_arg_ptr argv, int max)
61872 if (!p)
61873 break;
61874
61875- if (IS_ERR(p))
61876+ if (IS_ERR((const char __force_kernel *)p))
61877 return -EFAULT;
61878
61879 if (i >= max)
61880@@ -469,7 +510,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
61881
61882 ret = -EFAULT;
61883 str = get_user_arg_ptr(argv, argc);
61884- if (IS_ERR(str))
61885+ if (IS_ERR((const char __force_kernel *)str))
61886 goto out;
61887
61888 len = strnlen_user(str, MAX_ARG_STRLEN);
61889@@ -551,7 +592,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
61890 int r;
61891 mm_segment_t oldfs = get_fs();
61892 struct user_arg_ptr argv = {
61893- .ptr.native = (const char __user *const __user *)__argv,
61894+ .ptr.native = (const char __user * const __force_user *)__argv,
61895 };
61896
61897 set_fs(KERNEL_DS);
61898@@ -586,7 +627,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
61899 unsigned long new_end = old_end - shift;
61900 struct mmu_gather tlb;
61901
61902- BUG_ON(new_start > new_end);
61903+ if (new_start >= new_end || new_start < mmap_min_addr)
61904+ return -ENOMEM;
61905
61906 /*
61907 * ensure there are no vmas between where we want to go
61908@@ -595,6 +637,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
61909 if (vma != find_vma(mm, new_start))
61910 return -EFAULT;
61911
61912+#ifdef CONFIG_PAX_SEGMEXEC
61913+ BUG_ON(pax_find_mirror_vma(vma));
61914+#endif
61915+
61916 /*
61917 * cover the whole range: [new_start, old_end)
61918 */
61919@@ -675,10 +721,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
61920 stack_top = arch_align_stack(stack_top);
61921 stack_top = PAGE_ALIGN(stack_top);
61922
61923- if (unlikely(stack_top < mmap_min_addr) ||
61924- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
61925- return -ENOMEM;
61926-
61927 stack_shift = vma->vm_end - stack_top;
61928
61929 bprm->p -= stack_shift;
61930@@ -690,8 +732,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
61931 bprm->exec -= stack_shift;
61932
61933 down_write(&mm->mmap_sem);
61934+
61935+ /* Move stack pages down in memory. */
61936+ if (stack_shift) {
61937+ ret = shift_arg_pages(vma, stack_shift);
61938+ if (ret)
61939+ goto out_unlock;
61940+ }
61941+
61942 vm_flags = VM_STACK_FLAGS;
61943
61944+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61945+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
61946+ vm_flags &= ~VM_EXEC;
61947+
61948+#ifdef CONFIG_PAX_MPROTECT
61949+ if (mm->pax_flags & MF_PAX_MPROTECT)
61950+ vm_flags &= ~VM_MAYEXEC;
61951+#endif
61952+
61953+ }
61954+#endif
61955+
61956 /*
61957 * Adjust stack execute permissions; explicitly enable for
61958 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
61959@@ -710,13 +772,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
61960 goto out_unlock;
61961 BUG_ON(prev != vma);
61962
61963- /* Move stack pages down in memory. */
61964- if (stack_shift) {
61965- ret = shift_arg_pages(vma, stack_shift);
61966- if (ret)
61967- goto out_unlock;
61968- }
61969-
61970 /* mprotect_fixup is overkill to remove the temporary stack flags */
61971 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
61972
61973@@ -740,6 +795,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
61974 #endif
61975 current->mm->start_stack = bprm->p;
61976 ret = expand_stack(vma, stack_base);
61977+
61978+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
61979+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
61980+ unsigned long size;
61981+ vm_flags_t vm_flags;
61982+
61983+ size = STACK_TOP - vma->vm_end;
61984+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
61985+
61986+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
61987+
61988+#ifdef CONFIG_X86
61989+ if (!ret) {
61990+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
61991+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
61992+ }
61993+#endif
61994+
61995+ }
61996+#endif
61997+
61998 if (ret)
61999 ret = -EFAULT;
62000
62001@@ -775,6 +851,8 @@ static struct file *do_open_exec(struct filename *name)
62002
62003 fsnotify_open(file);
62004
62005+ trace_open_exec(name->name);
62006+
62007 err = deny_write_access(file);
62008 if (err)
62009 goto exit;
62010@@ -804,7 +882,7 @@ int kernel_read(struct file *file, loff_t offset,
62011 old_fs = get_fs();
62012 set_fs(get_ds());
62013 /* The cast to a user pointer is valid due to the set_fs() */
62014- result = vfs_read(file, (void __user *)addr, count, &pos);
62015+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
62016 set_fs(old_fs);
62017 return result;
62018 }
62019@@ -849,6 +927,7 @@ static int exec_mmap(struct mm_struct *mm)
62020 tsk->mm = mm;
62021 tsk->active_mm = mm;
62022 activate_mm(active_mm, mm);
62023+ populate_stack();
62024 tsk->mm->vmacache_seqnum = 0;
62025 vmacache_flush(tsk);
62026 task_unlock(tsk);
62027@@ -1247,7 +1326,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
62028 }
62029 rcu_read_unlock();
62030
62031- if (p->fs->users > n_fs)
62032+ if (atomic_read(&p->fs->users) > n_fs)
62033 bprm->unsafe |= LSM_UNSAFE_SHARE;
62034 else
62035 p->fs->in_exec = 1;
62036@@ -1423,6 +1502,31 @@ static int exec_binprm(struct linux_binprm *bprm)
62037 return ret;
62038 }
62039
62040+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62041+static DEFINE_PER_CPU(u64, exec_counter);
62042+static int __init init_exec_counters(void)
62043+{
62044+ unsigned int cpu;
62045+
62046+ for_each_possible_cpu(cpu) {
62047+ per_cpu(exec_counter, cpu) = (u64)cpu;
62048+ }
62049+
62050+ return 0;
62051+}
62052+early_initcall(init_exec_counters);
62053+static inline void increment_exec_counter(void)
62054+{
62055+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
62056+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
62057+}
62058+#else
62059+static inline void increment_exec_counter(void) {}
62060+#endif
62061+
62062+extern void gr_handle_exec_args(struct linux_binprm *bprm,
62063+ struct user_arg_ptr argv);
62064+
62065 /*
62066 * sys_execve() executes a new program.
62067 */
62068@@ -1430,6 +1534,11 @@ static int do_execve_common(struct filename *filename,
62069 struct user_arg_ptr argv,
62070 struct user_arg_ptr envp)
62071 {
62072+#ifdef CONFIG_GRKERNSEC
62073+ struct file *old_exec_file;
62074+ struct acl_subject_label *old_acl;
62075+ struct rlimit old_rlim[RLIM_NLIMITS];
62076+#endif
62077 struct linux_binprm *bprm;
62078 struct file *file;
62079 struct files_struct *displaced;
62080@@ -1438,6 +1547,8 @@ static int do_execve_common(struct filename *filename,
62081 if (IS_ERR(filename))
62082 return PTR_ERR(filename);
62083
62084+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
62085+
62086 /*
62087 * We move the actual failure in case of RLIMIT_NPROC excess from
62088 * set*uid() to execve() because too many poorly written programs
62089@@ -1475,11 +1586,21 @@ static int do_execve_common(struct filename *filename,
62090 if (IS_ERR(file))
62091 goto out_unmark;
62092
62093+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
62094+ retval = -EPERM;
62095+ goto out_unmark;
62096+ }
62097+
62098 sched_exec();
62099
62100 bprm->file = file;
62101 bprm->filename = bprm->interp = filename->name;
62102
62103+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
62104+ retval = -EACCES;
62105+ goto out_unmark;
62106+ }
62107+
62108 retval = bprm_mm_init(bprm);
62109 if (retval)
62110 goto out_unmark;
62111@@ -1496,24 +1617,70 @@ static int do_execve_common(struct filename *filename,
62112 if (retval < 0)
62113 goto out;
62114
62115+#ifdef CONFIG_GRKERNSEC
62116+ old_acl = current->acl;
62117+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
62118+ old_exec_file = current->exec_file;
62119+ get_file(file);
62120+ current->exec_file = file;
62121+#endif
62122+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62123+ /* limit suid stack to 8MB
62124+ * we saved the old limits above and will restore them if this exec fails
62125+ */
62126+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
62127+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
62128+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
62129+#endif
62130+
62131+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
62132+ retval = -EPERM;
62133+ goto out_fail;
62134+ }
62135+
62136+ if (!gr_tpe_allow(file)) {
62137+ retval = -EACCES;
62138+ goto out_fail;
62139+ }
62140+
62141+ if (gr_check_crash_exec(file)) {
62142+ retval = -EACCES;
62143+ goto out_fail;
62144+ }
62145+
62146+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
62147+ bprm->unsafe);
62148+ if (retval < 0)
62149+ goto out_fail;
62150+
62151 retval = copy_strings_kernel(1, &bprm->filename, bprm);
62152 if (retval < 0)
62153- goto out;
62154+ goto out_fail;
62155
62156 bprm->exec = bprm->p;
62157 retval = copy_strings(bprm->envc, envp, bprm);
62158 if (retval < 0)
62159- goto out;
62160+ goto out_fail;
62161
62162 retval = copy_strings(bprm->argc, argv, bprm);
62163 if (retval < 0)
62164- goto out;
62165+ goto out_fail;
62166+
62167+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
62168+
62169+ gr_handle_exec_args(bprm, argv);
62170
62171 retval = exec_binprm(bprm);
62172 if (retval < 0)
62173- goto out;
62174+ goto out_fail;
62175+#ifdef CONFIG_GRKERNSEC
62176+ if (old_exec_file)
62177+ fput(old_exec_file);
62178+#endif
62179
62180 /* execve succeeded */
62181+
62182+ increment_exec_counter();
62183 current->fs->in_exec = 0;
62184 current->in_execve = 0;
62185 acct_update_integrals(current);
62186@@ -1524,6 +1691,14 @@ static int do_execve_common(struct filename *filename,
62187 put_files_struct(displaced);
62188 return retval;
62189
62190+out_fail:
62191+#ifdef CONFIG_GRKERNSEC
62192+ current->acl = old_acl;
62193+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
62194+ fput(current->exec_file);
62195+ current->exec_file = old_exec_file;
62196+#endif
62197+
62198 out:
62199 if (bprm->mm) {
62200 acct_arg_size(bprm, 0);
62201@@ -1615,3 +1790,312 @@ COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
62202 return compat_do_execve(getname(filename), argv, envp);
62203 }
62204 #endif
62205+
62206+int pax_check_flags(unsigned long *flags)
62207+{
62208+ int retval = 0;
62209+
62210+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
62211+ if (*flags & MF_PAX_SEGMEXEC)
62212+ {
62213+ *flags &= ~MF_PAX_SEGMEXEC;
62214+ retval = -EINVAL;
62215+ }
62216+#endif
62217+
62218+ if ((*flags & MF_PAX_PAGEEXEC)
62219+
62220+#ifdef CONFIG_PAX_PAGEEXEC
62221+ && (*flags & MF_PAX_SEGMEXEC)
62222+#endif
62223+
62224+ )
62225+ {
62226+ *flags &= ~MF_PAX_PAGEEXEC;
62227+ retval = -EINVAL;
62228+ }
62229+
62230+ if ((*flags & MF_PAX_MPROTECT)
62231+
62232+#ifdef CONFIG_PAX_MPROTECT
62233+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62234+#endif
62235+
62236+ )
62237+ {
62238+ *flags &= ~MF_PAX_MPROTECT;
62239+ retval = -EINVAL;
62240+ }
62241+
62242+ if ((*flags & MF_PAX_EMUTRAMP)
62243+
62244+#ifdef CONFIG_PAX_EMUTRAMP
62245+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62246+#endif
62247+
62248+ )
62249+ {
62250+ *flags &= ~MF_PAX_EMUTRAMP;
62251+ retval = -EINVAL;
62252+ }
62253+
62254+ return retval;
62255+}
62256+
62257+EXPORT_SYMBOL(pax_check_flags);
62258+
62259+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62260+char *pax_get_path(const struct path *path, char *buf, int buflen)
62261+{
62262+ char *pathname = d_path(path, buf, buflen);
62263+
62264+ if (IS_ERR(pathname))
62265+ goto toolong;
62266+
62267+ pathname = mangle_path(buf, pathname, "\t\n\\");
62268+ if (!pathname)
62269+ goto toolong;
62270+
62271+ *pathname = 0;
62272+ return buf;
62273+
62274+toolong:
62275+ return "<path too long>";
62276+}
62277+EXPORT_SYMBOL(pax_get_path);
62278+
62279+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
62280+{
62281+ struct task_struct *tsk = current;
62282+ struct mm_struct *mm = current->mm;
62283+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
62284+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
62285+ char *path_exec = NULL;
62286+ char *path_fault = NULL;
62287+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
62288+ siginfo_t info = { };
62289+
62290+ if (buffer_exec && buffer_fault) {
62291+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
62292+
62293+ down_read(&mm->mmap_sem);
62294+ vma = mm->mmap;
62295+ while (vma && (!vma_exec || !vma_fault)) {
62296+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
62297+ vma_exec = vma;
62298+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
62299+ vma_fault = vma;
62300+ vma = vma->vm_next;
62301+ }
62302+ if (vma_exec)
62303+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
62304+ if (vma_fault) {
62305+ start = vma_fault->vm_start;
62306+ end = vma_fault->vm_end;
62307+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
62308+ if (vma_fault->vm_file)
62309+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
62310+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
62311+ path_fault = "<heap>";
62312+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
62313+ path_fault = "<stack>";
62314+ else
62315+ path_fault = "<anonymous mapping>";
62316+ }
62317+ up_read(&mm->mmap_sem);
62318+ }
62319+ if (tsk->signal->curr_ip)
62320+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
62321+ else
62322+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
62323+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
62324+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
62325+ free_page((unsigned long)buffer_exec);
62326+ free_page((unsigned long)buffer_fault);
62327+ pax_report_insns(regs, pc, sp);
62328+ info.si_signo = SIGKILL;
62329+ info.si_errno = 0;
62330+ info.si_code = SI_KERNEL;
62331+ info.si_pid = 0;
62332+ info.si_uid = 0;
62333+ do_coredump(&info);
62334+}
62335+#endif
62336+
62337+#ifdef CONFIG_PAX_REFCOUNT
62338+void pax_report_refcount_overflow(struct pt_regs *regs)
62339+{
62340+ if (current->signal->curr_ip)
62341+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
62342+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
62343+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
62344+ else
62345+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
62346+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
62347+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
62348+ preempt_disable();
62349+ show_regs(regs);
62350+ preempt_enable();
62351+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
62352+}
62353+#endif
62354+
62355+#ifdef CONFIG_PAX_USERCOPY
62356+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
62357+static noinline int check_stack_object(const void *obj, unsigned long len)
62358+{
62359+ const void * const stack = task_stack_page(current);
62360+ const void * const stackend = stack + THREAD_SIZE;
62361+
62362+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
62363+ const void *frame = NULL;
62364+ const void *oldframe;
62365+#endif
62366+
62367+ if (obj + len < obj)
62368+ return -1;
62369+
62370+ if (obj + len <= stack || stackend <= obj)
62371+ return 0;
62372+
62373+ if (obj < stack || stackend < obj + len)
62374+ return -1;
62375+
62376+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
62377+ oldframe = __builtin_frame_address(1);
62378+ if (oldframe)
62379+ frame = __builtin_frame_address(2);
62380+ /*
62381+ low ----------------------------------------------> high
62382+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
62383+ ^----------------^
62384+ allow copies only within here
62385+ */
62386+ while (stack <= frame && frame < stackend) {
62387+ /* if obj + len extends past the last frame, this
62388+ check won't pass and the next frame will be 0,
62389+ causing us to bail out and correctly report
62390+ the copy as invalid
62391+ */
62392+ if (obj + len <= frame)
62393+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
62394+ oldframe = frame;
62395+ frame = *(const void * const *)frame;
62396+ }
62397+ return -1;
62398+#else
62399+ return 1;
62400+#endif
62401+}
62402+
62403+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
62404+{
62405+ if (current->signal->curr_ip)
62406+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
62407+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
62408+ else
62409+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
62410+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
62411+ dump_stack();
62412+ gr_handle_kernel_exploit();
62413+ do_group_exit(SIGKILL);
62414+}
62415+#endif
62416+
62417+#ifdef CONFIG_PAX_USERCOPY
62418+
62419+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
62420+{
62421+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62422+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
62423+#ifdef CONFIG_MODULES
62424+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
62425+#else
62426+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
62427+#endif
62428+
62429+#else
62430+ unsigned long textlow = (unsigned long)_stext;
62431+ unsigned long texthigh = (unsigned long)_etext;
62432+
62433+#ifdef CONFIG_X86_64
62434+ /* check against linear mapping as well */
62435+ if (high > (unsigned long)__va(__pa(textlow)) &&
62436+ low < (unsigned long)__va(__pa(texthigh)))
62437+ return true;
62438+#endif
62439+
62440+#endif
62441+
62442+ if (high <= textlow || low >= texthigh)
62443+ return false;
62444+ else
62445+ return true;
62446+}
62447+#endif
62448+
62449+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
62450+{
62451+#ifdef CONFIG_PAX_USERCOPY
62452+ const char *type;
62453+#endif
62454+
62455+#ifndef CONFIG_STACK_GROWSUP
62456+ unsigned long stackstart = (unsigned long)task_stack_page(current);
62457+ unsigned long currentsp = (unsigned long)&stackstart;
62458+ if (unlikely((currentsp < stackstart + 512 ||
62459+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
62460+ BUG();
62461+#endif
62462+
62463+#ifndef CONFIG_PAX_USERCOPY_DEBUG
62464+ if (const_size)
62465+ return;
62466+#endif
62467+
62468+#ifdef CONFIG_PAX_USERCOPY
62469+ if (!n)
62470+ return;
62471+
62472+ type = check_heap_object(ptr, n);
62473+ if (!type) {
62474+ int ret = check_stack_object(ptr, n);
62475+ if (ret == 1 || ret == 2)
62476+ return;
62477+ if (ret == 0) {
62478+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
62479+ type = "<kernel text>";
62480+ else
62481+ return;
62482+ } else
62483+ type = "<process stack>";
62484+ }
62485+
62486+ pax_report_usercopy(ptr, n, to_user, type);
62487+#endif
62488+
62489+}
62490+EXPORT_SYMBOL(__check_object_size);
62491+
62492+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
62493+void pax_track_stack(void)
62494+{
62495+ unsigned long sp = (unsigned long)&sp;
62496+ if (sp < current_thread_info()->lowest_stack &&
62497+ sp > (unsigned long)task_stack_page(current))
62498+ current_thread_info()->lowest_stack = sp;
62499+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
62500+ BUG();
62501+}
62502+EXPORT_SYMBOL(pax_track_stack);
62503+#endif
62504+
62505+#ifdef CONFIG_PAX_SIZE_OVERFLOW
62506+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
62507+{
62508+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
62509+ dump_stack();
62510+ do_group_exit(SIGKILL);
62511+}
62512+EXPORT_SYMBOL(report_size_overflow);
62513+#endif
62514diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
62515index 9f9992b..8b59411 100644
62516--- a/fs/ext2/balloc.c
62517+++ b/fs/ext2/balloc.c
62518@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
62519
62520 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
62521 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
62522- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
62523+ if (free_blocks < root_blocks + 1 &&
62524 !uid_eq(sbi->s_resuid, current_fsuid()) &&
62525 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
62526- !in_group_p (sbi->s_resgid))) {
62527+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
62528 return 0;
62529 }
62530 return 1;
62531diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
62532index 9142614..97484fa 100644
62533--- a/fs/ext2/xattr.c
62534+++ b/fs/ext2/xattr.c
62535@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
62536 struct buffer_head *bh = NULL;
62537 struct ext2_xattr_entry *entry;
62538 char *end;
62539- size_t rest = buffer_size;
62540+ size_t rest = buffer_size, total_size = 0;
62541 int error;
62542
62543 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
62544@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
62545 buffer += size;
62546 }
62547 rest -= size;
62548+ total_size += size;
62549 }
62550 }
62551- error = buffer_size - rest; /* total size */
62552+ error = total_size;
62553
62554 cleanup:
62555 brelse(bh);
62556diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
62557index 158b5d4..2432610 100644
62558--- a/fs/ext3/balloc.c
62559+++ b/fs/ext3/balloc.c
62560@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
62561
62562 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
62563 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
62564- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
62565+ if (free_blocks < root_blocks + 1 &&
62566 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
62567 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
62568- !in_group_p (sbi->s_resgid))) {
62569+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
62570 return 0;
62571 }
62572 return 1;
62573diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
62574index c6874be..f8a6ae8 100644
62575--- a/fs/ext3/xattr.c
62576+++ b/fs/ext3/xattr.c
62577@@ -330,7 +330,7 @@ static int
62578 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
62579 char *buffer, size_t buffer_size)
62580 {
62581- size_t rest = buffer_size;
62582+ size_t rest = buffer_size, total_size = 0;
62583
62584 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
62585 const struct xattr_handler *handler =
62586@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
62587 buffer += size;
62588 }
62589 rest -= size;
62590+ total_size += size;
62591 }
62592 }
62593- return buffer_size - rest;
62594+ return total_size;
62595 }
62596
62597 static int
62598diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
62599index fca3820..e1ea241 100644
62600--- a/fs/ext4/balloc.c
62601+++ b/fs/ext4/balloc.c
62602@@ -553,8 +553,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
62603 /* Hm, nope. Are (enough) root reserved clusters available? */
62604 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
62605 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
62606- capable(CAP_SYS_RESOURCE) ||
62607- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
62608+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
62609+ capable_nolog(CAP_SYS_RESOURCE)) {
62610
62611 if (free_clusters >= (nclusters + dirty_clusters +
62612 resv_clusters))
62613diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
62614index 7cc5a0e..851f176 100644
62615--- a/fs/ext4/ext4.h
62616+++ b/fs/ext4/ext4.h
62617@@ -1276,19 +1276,19 @@ struct ext4_sb_info {
62618 unsigned long s_mb_last_start;
62619
62620 /* stats for buddy allocator */
62621- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
62622- atomic_t s_bal_success; /* we found long enough chunks */
62623- atomic_t s_bal_allocated; /* in blocks */
62624- atomic_t s_bal_ex_scanned; /* total extents scanned */
62625- atomic_t s_bal_goals; /* goal hits */
62626- atomic_t s_bal_breaks; /* too long searches */
62627- atomic_t s_bal_2orders; /* 2^order hits */
62628+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
62629+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
62630+ atomic_unchecked_t s_bal_allocated; /* in blocks */
62631+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
62632+ atomic_unchecked_t s_bal_goals; /* goal hits */
62633+ atomic_unchecked_t s_bal_breaks; /* too long searches */
62634+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
62635 spinlock_t s_bal_lock;
62636 unsigned long s_mb_buddies_generated;
62637 unsigned long long s_mb_generation_time;
62638- atomic_t s_mb_lost_chunks;
62639- atomic_t s_mb_preallocated;
62640- atomic_t s_mb_discarded;
62641+ atomic_unchecked_t s_mb_lost_chunks;
62642+ atomic_unchecked_t s_mb_preallocated;
62643+ atomic_unchecked_t s_mb_discarded;
62644 atomic_t s_lock_busy;
62645
62646 /* locality groups */
62647diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
62648index 2dcb936..f5625a0 100644
62649--- a/fs/ext4/mballoc.c
62650+++ b/fs/ext4/mballoc.c
62651@@ -1899,7 +1899,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
62652 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
62653
62654 if (EXT4_SB(sb)->s_mb_stats)
62655- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
62656+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
62657
62658 break;
62659 }
62660@@ -2209,7 +2209,7 @@ repeat:
62661 ac->ac_status = AC_STATUS_CONTINUE;
62662 ac->ac_flags |= EXT4_MB_HINT_FIRST;
62663 cr = 3;
62664- atomic_inc(&sbi->s_mb_lost_chunks);
62665+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
62666 goto repeat;
62667 }
62668 }
62669@@ -2715,25 +2715,25 @@ int ext4_mb_release(struct super_block *sb)
62670 if (sbi->s_mb_stats) {
62671 ext4_msg(sb, KERN_INFO,
62672 "mballoc: %u blocks %u reqs (%u success)",
62673- atomic_read(&sbi->s_bal_allocated),
62674- atomic_read(&sbi->s_bal_reqs),
62675- atomic_read(&sbi->s_bal_success));
62676+ atomic_read_unchecked(&sbi->s_bal_allocated),
62677+ atomic_read_unchecked(&sbi->s_bal_reqs),
62678+ atomic_read_unchecked(&sbi->s_bal_success));
62679 ext4_msg(sb, KERN_INFO,
62680 "mballoc: %u extents scanned, %u goal hits, "
62681 "%u 2^N hits, %u breaks, %u lost",
62682- atomic_read(&sbi->s_bal_ex_scanned),
62683- atomic_read(&sbi->s_bal_goals),
62684- atomic_read(&sbi->s_bal_2orders),
62685- atomic_read(&sbi->s_bal_breaks),
62686- atomic_read(&sbi->s_mb_lost_chunks));
62687+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
62688+ atomic_read_unchecked(&sbi->s_bal_goals),
62689+ atomic_read_unchecked(&sbi->s_bal_2orders),
62690+ atomic_read_unchecked(&sbi->s_bal_breaks),
62691+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
62692 ext4_msg(sb, KERN_INFO,
62693 "mballoc: %lu generated and it took %Lu",
62694 sbi->s_mb_buddies_generated,
62695 sbi->s_mb_generation_time);
62696 ext4_msg(sb, KERN_INFO,
62697 "mballoc: %u preallocated, %u discarded",
62698- atomic_read(&sbi->s_mb_preallocated),
62699- atomic_read(&sbi->s_mb_discarded));
62700+ atomic_read_unchecked(&sbi->s_mb_preallocated),
62701+ atomic_read_unchecked(&sbi->s_mb_discarded));
62702 }
62703
62704 free_percpu(sbi->s_locality_groups);
62705@@ -3189,16 +3189,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
62706 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
62707
62708 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
62709- atomic_inc(&sbi->s_bal_reqs);
62710- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
62711+ atomic_inc_unchecked(&sbi->s_bal_reqs);
62712+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
62713 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
62714- atomic_inc(&sbi->s_bal_success);
62715- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
62716+ atomic_inc_unchecked(&sbi->s_bal_success);
62717+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
62718 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
62719 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
62720- atomic_inc(&sbi->s_bal_goals);
62721+ atomic_inc_unchecked(&sbi->s_bal_goals);
62722 if (ac->ac_found > sbi->s_mb_max_to_scan)
62723- atomic_inc(&sbi->s_bal_breaks);
62724+ atomic_inc_unchecked(&sbi->s_bal_breaks);
62725 }
62726
62727 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
62728@@ -3603,7 +3603,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
62729 trace_ext4_mb_new_inode_pa(ac, pa);
62730
62731 ext4_mb_use_inode_pa(ac, pa);
62732- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
62733+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
62734
62735 ei = EXT4_I(ac->ac_inode);
62736 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62737@@ -3663,7 +3663,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
62738 trace_ext4_mb_new_group_pa(ac, pa);
62739
62740 ext4_mb_use_group_pa(ac, pa);
62741- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62742+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62743
62744 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62745 lg = ac->ac_lg;
62746@@ -3752,7 +3752,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
62747 * from the bitmap and continue.
62748 */
62749 }
62750- atomic_add(free, &sbi->s_mb_discarded);
62751+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
62752
62753 return err;
62754 }
62755@@ -3770,7 +3770,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
62756 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
62757 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
62758 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
62759- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62760+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62761 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
62762
62763 return 0;
62764diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
62765index 32bce84..112d969 100644
62766--- a/fs/ext4/mmp.c
62767+++ b/fs/ext4/mmp.c
62768@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
62769 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
62770 const char *function, unsigned int line, const char *msg)
62771 {
62772- __ext4_warning(sb, function, line, msg);
62773+ __ext4_warning(sb, function, line, "%s", msg);
62774 __ext4_warning(sb, function, line,
62775 "MMP failure info: last update time: %llu, last update "
62776 "node: %s, last update device: %s\n",
62777diff --git a/fs/ext4/super.c b/fs/ext4/super.c
62778index 6df7bc6..410a655 100644
62779--- a/fs/ext4/super.c
62780+++ b/fs/ext4/super.c
62781@@ -1276,7 +1276,7 @@ static ext4_fsblk_t get_sb_block(void **data)
62782 }
62783
62784 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
62785-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
62786+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
62787 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
62788
62789 #ifdef CONFIG_QUOTA
62790@@ -2464,7 +2464,7 @@ struct ext4_attr {
62791 int offset;
62792 int deprecated_val;
62793 } u;
62794-};
62795+} __do_const;
62796
62797 static int parse_strtoull(const char *buf,
62798 unsigned long long max, unsigned long long *value)
62799diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
62800index e738733..9843a6c 100644
62801--- a/fs/ext4/xattr.c
62802+++ b/fs/ext4/xattr.c
62803@@ -386,7 +386,7 @@ static int
62804 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
62805 char *buffer, size_t buffer_size)
62806 {
62807- size_t rest = buffer_size;
62808+ size_t rest = buffer_size, total_size = 0;
62809
62810 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
62811 const struct xattr_handler *handler =
62812@@ -403,9 +403,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
62813 buffer += size;
62814 }
62815 rest -= size;
62816+ total_size += size;
62817 }
62818 }
62819- return buffer_size - rest;
62820+ return total_size;
62821 }
62822
62823 static int
62824diff --git a/fs/fcntl.c b/fs/fcntl.c
62825index 72c82f6..a18b263 100644
62826--- a/fs/fcntl.c
62827+++ b/fs/fcntl.c
62828@@ -106,6 +106,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
62829 if (err)
62830 return err;
62831
62832+ if (gr_handle_chroot_fowner(pid, type))
62833+ return -ENOENT;
62834+ if (gr_check_protected_task_fowner(pid, type))
62835+ return -EACCES;
62836+
62837 f_modown(filp, pid, type, force);
62838 return 0;
62839 }
62840diff --git a/fs/fhandle.c b/fs/fhandle.c
62841index 999ff5c..ac037c9 100644
62842--- a/fs/fhandle.c
62843+++ b/fs/fhandle.c
62844@@ -8,6 +8,7 @@
62845 #include <linux/fs_struct.h>
62846 #include <linux/fsnotify.h>
62847 #include <linux/personality.h>
62848+#include <linux/grsecurity.h>
62849 #include <asm/uaccess.h>
62850 #include "internal.h"
62851 #include "mount.h"
62852@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
62853 } else
62854 retval = 0;
62855 /* copy the mount id */
62856- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
62857- sizeof(*mnt_id)) ||
62858+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
62859 copy_to_user(ufh, handle,
62860 sizeof(struct file_handle) + handle_bytes))
62861 retval = -EFAULT;
62862@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
62863 * the directory. Ideally we would like CAP_DAC_SEARCH.
62864 * But we don't have that
62865 */
62866- if (!capable(CAP_DAC_READ_SEARCH)) {
62867+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
62868 retval = -EPERM;
62869 goto out_err;
62870 }
62871diff --git a/fs/file.c b/fs/file.c
62872index 66923fe..2849783 100644
62873--- a/fs/file.c
62874+++ b/fs/file.c
62875@@ -16,6 +16,7 @@
62876 #include <linux/slab.h>
62877 #include <linux/vmalloc.h>
62878 #include <linux/file.h>
62879+#include <linux/security.h>
62880 #include <linux/fdtable.h>
62881 #include <linux/bitops.h>
62882 #include <linux/interrupt.h>
62883@@ -139,7 +140,7 @@ out:
62884 * Return <0 error code on error; 1 on successful completion.
62885 * The files->file_lock should be held on entry, and will be held on exit.
62886 */
62887-static int expand_fdtable(struct files_struct *files, int nr)
62888+static int expand_fdtable(struct files_struct *files, unsigned int nr)
62889 __releases(files->file_lock)
62890 __acquires(files->file_lock)
62891 {
62892@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
62893 * expanded and execution may have blocked.
62894 * The files->file_lock should be held on entry, and will be held on exit.
62895 */
62896-static int expand_files(struct files_struct *files, int nr)
62897+static int expand_files(struct files_struct *files, unsigned int nr)
62898 {
62899 struct fdtable *fdt;
62900
62901@@ -799,6 +800,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
62902 if (!file)
62903 return __close_fd(files, fd);
62904
62905+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
62906 if (fd >= rlimit(RLIMIT_NOFILE))
62907 return -EBADF;
62908
62909@@ -825,6 +827,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
62910 if (unlikely(oldfd == newfd))
62911 return -EINVAL;
62912
62913+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
62914 if (newfd >= rlimit(RLIMIT_NOFILE))
62915 return -EBADF;
62916
62917@@ -880,6 +883,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
62918 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
62919 {
62920 int err;
62921+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
62922 if (from >= rlimit(RLIMIT_NOFILE))
62923 return -EINVAL;
62924 err = alloc_fd(from, flags);
62925diff --git a/fs/filesystems.c b/fs/filesystems.c
62926index 5797d45..7d7d79a 100644
62927--- a/fs/filesystems.c
62928+++ b/fs/filesystems.c
62929@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
62930 int len = dot ? dot - name : strlen(name);
62931
62932 fs = __get_fs_type(name, len);
62933+#ifdef CONFIG_GRKERNSEC_MODHARDEN
62934+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
62935+#else
62936 if (!fs && (request_module("fs-%.*s", len, name) == 0))
62937+#endif
62938 fs = __get_fs_type(name, len);
62939
62940 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
62941diff --git a/fs/fs_struct.c b/fs/fs_struct.c
62942index 7dca743..543d620 100644
62943--- a/fs/fs_struct.c
62944+++ b/fs/fs_struct.c
62945@@ -4,6 +4,7 @@
62946 #include <linux/path.h>
62947 #include <linux/slab.h>
62948 #include <linux/fs_struct.h>
62949+#include <linux/grsecurity.h>
62950 #include "internal.h"
62951
62952 /*
62953@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
62954 write_seqcount_begin(&fs->seq);
62955 old_root = fs->root;
62956 fs->root = *path;
62957+ gr_set_chroot_entries(current, path);
62958 write_seqcount_end(&fs->seq);
62959 spin_unlock(&fs->lock);
62960 if (old_root.dentry)
62961@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
62962 int hits = 0;
62963 spin_lock(&fs->lock);
62964 write_seqcount_begin(&fs->seq);
62965+ /* this root replacement is only done by pivot_root,
62966+ leave grsec's chroot tagging alone for this task
62967+ so that a pivoted root isn't treated as a chroot
62968+ */
62969 hits += replace_path(&fs->root, old_root, new_root);
62970 hits += replace_path(&fs->pwd, old_root, new_root);
62971 write_seqcount_end(&fs->seq);
62972@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
62973 task_lock(tsk);
62974 spin_lock(&fs->lock);
62975 tsk->fs = NULL;
62976- kill = !--fs->users;
62977+ gr_clear_chroot_entries(tsk);
62978+ kill = !atomic_dec_return(&fs->users);
62979 spin_unlock(&fs->lock);
62980 task_unlock(tsk);
62981 if (kill)
62982@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62983 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
62984 /* We don't need to lock fs - think why ;-) */
62985 if (fs) {
62986- fs->users = 1;
62987+ atomic_set(&fs->users, 1);
62988 fs->in_exec = 0;
62989 spin_lock_init(&fs->lock);
62990 seqcount_init(&fs->seq);
62991@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62992 spin_lock(&old->lock);
62993 fs->root = old->root;
62994 path_get(&fs->root);
62995+ /* instead of calling gr_set_chroot_entries here,
62996+ we call it from every caller of this function
62997+ */
62998 fs->pwd = old->pwd;
62999 path_get(&fs->pwd);
63000 spin_unlock(&old->lock);
63001@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
63002
63003 task_lock(current);
63004 spin_lock(&fs->lock);
63005- kill = !--fs->users;
63006+ kill = !atomic_dec_return(&fs->users);
63007 current->fs = new_fs;
63008+ gr_set_chroot_entries(current, &new_fs->root);
63009 spin_unlock(&fs->lock);
63010 task_unlock(current);
63011
63012@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
63013
63014 int current_umask(void)
63015 {
63016- return current->fs->umask;
63017+ return current->fs->umask | gr_acl_umask();
63018 }
63019 EXPORT_SYMBOL(current_umask);
63020
63021 /* to be mentioned only in INIT_TASK */
63022 struct fs_struct init_fs = {
63023- .users = 1,
63024+ .users = ATOMIC_INIT(1),
63025 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
63026 .seq = SEQCNT_ZERO(init_fs.seq),
63027 .umask = 0022,
63028diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
63029index aec01be..cf81ff9 100644
63030--- a/fs/fscache/cookie.c
63031+++ b/fs/fscache/cookie.c
63032@@ -19,7 +19,7 @@
63033
63034 struct kmem_cache *fscache_cookie_jar;
63035
63036-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
63037+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
63038
63039 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
63040 static int fscache_alloc_object(struct fscache_cache *cache,
63041@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
63042 parent ? (char *) parent->def->name : "<no-parent>",
63043 def->name, netfs_data, enable);
63044
63045- fscache_stat(&fscache_n_acquires);
63046+ fscache_stat_unchecked(&fscache_n_acquires);
63047
63048 /* if there's no parent cookie, then we don't create one here either */
63049 if (!parent) {
63050- fscache_stat(&fscache_n_acquires_null);
63051+ fscache_stat_unchecked(&fscache_n_acquires_null);
63052 _leave(" [no parent]");
63053 return NULL;
63054 }
63055@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63056 /* allocate and initialise a cookie */
63057 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
63058 if (!cookie) {
63059- fscache_stat(&fscache_n_acquires_oom);
63060+ fscache_stat_unchecked(&fscache_n_acquires_oom);
63061 _leave(" [ENOMEM]");
63062 return NULL;
63063 }
63064@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
63065
63066 switch (cookie->def->type) {
63067 case FSCACHE_COOKIE_TYPE_INDEX:
63068- fscache_stat(&fscache_n_cookie_index);
63069+ fscache_stat_unchecked(&fscache_n_cookie_index);
63070 break;
63071 case FSCACHE_COOKIE_TYPE_DATAFILE:
63072- fscache_stat(&fscache_n_cookie_data);
63073+ fscache_stat_unchecked(&fscache_n_cookie_data);
63074 break;
63075 default:
63076- fscache_stat(&fscache_n_cookie_special);
63077+ fscache_stat_unchecked(&fscache_n_cookie_special);
63078 break;
63079 }
63080
63081@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63082 } else {
63083 atomic_dec(&parent->n_children);
63084 __fscache_cookie_put(cookie);
63085- fscache_stat(&fscache_n_acquires_nobufs);
63086+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
63087 _leave(" = NULL");
63088 return NULL;
63089 }
63090@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63091 }
63092 }
63093
63094- fscache_stat(&fscache_n_acquires_ok);
63095+ fscache_stat_unchecked(&fscache_n_acquires_ok);
63096 _leave(" = %p", cookie);
63097 return cookie;
63098 }
63099@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
63100 cache = fscache_select_cache_for_object(cookie->parent);
63101 if (!cache) {
63102 up_read(&fscache_addremove_sem);
63103- fscache_stat(&fscache_n_acquires_no_cache);
63104+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
63105 _leave(" = -ENOMEDIUM [no cache]");
63106 return -ENOMEDIUM;
63107 }
63108@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
63109 object = cache->ops->alloc_object(cache, cookie);
63110 fscache_stat_d(&fscache_n_cop_alloc_object);
63111 if (IS_ERR(object)) {
63112- fscache_stat(&fscache_n_object_no_alloc);
63113+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
63114 ret = PTR_ERR(object);
63115 goto error;
63116 }
63117
63118- fscache_stat(&fscache_n_object_alloc);
63119+ fscache_stat_unchecked(&fscache_n_object_alloc);
63120
63121- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
63122+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
63123
63124 _debug("ALLOC OBJ%x: %s {%lx}",
63125 object->debug_id, cookie->def->name, object->events);
63126@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
63127
63128 _enter("{%s}", cookie->def->name);
63129
63130- fscache_stat(&fscache_n_invalidates);
63131+ fscache_stat_unchecked(&fscache_n_invalidates);
63132
63133 /* Only permit invalidation of data files. Invalidating an index will
63134 * require the caller to release all its attachments to the tree rooted
63135@@ -477,10 +477,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
63136 {
63137 struct fscache_object *object;
63138
63139- fscache_stat(&fscache_n_updates);
63140+ fscache_stat_unchecked(&fscache_n_updates);
63141
63142 if (!cookie) {
63143- fscache_stat(&fscache_n_updates_null);
63144+ fscache_stat_unchecked(&fscache_n_updates_null);
63145 _leave(" [no cookie]");
63146 return;
63147 }
63148@@ -581,12 +581,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
63149 */
63150 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
63151 {
63152- fscache_stat(&fscache_n_relinquishes);
63153+ fscache_stat_unchecked(&fscache_n_relinquishes);
63154 if (retire)
63155- fscache_stat(&fscache_n_relinquishes_retire);
63156+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
63157
63158 if (!cookie) {
63159- fscache_stat(&fscache_n_relinquishes_null);
63160+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
63161 _leave(" [no cookie]");
63162 return;
63163 }
63164@@ -687,7 +687,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
63165 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
63166 goto inconsistent;
63167
63168- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
63169+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63170
63171 __fscache_use_cookie(cookie);
63172 if (fscache_submit_op(object, op) < 0)
63173diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
63174index bc6c08f..09c0d96 100644
63175--- a/fs/fscache/internal.h
63176+++ b/fs/fscache/internal.h
63177@@ -139,8 +139,8 @@ extern void fscache_operation_gc(struct work_struct *);
63178 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
63179 extern int fscache_wait_for_operation_activation(struct fscache_object *,
63180 struct fscache_operation *,
63181- atomic_t *,
63182- atomic_t *,
63183+ atomic_unchecked_t *,
63184+ atomic_unchecked_t *,
63185 void (*)(struct fscache_operation *));
63186 extern void fscache_invalidate_writes(struct fscache_cookie *);
63187
63188@@ -159,101 +159,101 @@ extern void fscache_proc_cleanup(void);
63189 * stats.c
63190 */
63191 #ifdef CONFIG_FSCACHE_STATS
63192-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
63193-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
63194+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
63195+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
63196
63197-extern atomic_t fscache_n_op_pend;
63198-extern atomic_t fscache_n_op_run;
63199-extern atomic_t fscache_n_op_enqueue;
63200-extern atomic_t fscache_n_op_deferred_release;
63201-extern atomic_t fscache_n_op_release;
63202-extern atomic_t fscache_n_op_gc;
63203-extern atomic_t fscache_n_op_cancelled;
63204-extern atomic_t fscache_n_op_rejected;
63205+extern atomic_unchecked_t fscache_n_op_pend;
63206+extern atomic_unchecked_t fscache_n_op_run;
63207+extern atomic_unchecked_t fscache_n_op_enqueue;
63208+extern atomic_unchecked_t fscache_n_op_deferred_release;
63209+extern atomic_unchecked_t fscache_n_op_release;
63210+extern atomic_unchecked_t fscache_n_op_gc;
63211+extern atomic_unchecked_t fscache_n_op_cancelled;
63212+extern atomic_unchecked_t fscache_n_op_rejected;
63213
63214-extern atomic_t fscache_n_attr_changed;
63215-extern atomic_t fscache_n_attr_changed_ok;
63216-extern atomic_t fscache_n_attr_changed_nobufs;
63217-extern atomic_t fscache_n_attr_changed_nomem;
63218-extern atomic_t fscache_n_attr_changed_calls;
63219+extern atomic_unchecked_t fscache_n_attr_changed;
63220+extern atomic_unchecked_t fscache_n_attr_changed_ok;
63221+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
63222+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
63223+extern atomic_unchecked_t fscache_n_attr_changed_calls;
63224
63225-extern atomic_t fscache_n_allocs;
63226-extern atomic_t fscache_n_allocs_ok;
63227-extern atomic_t fscache_n_allocs_wait;
63228-extern atomic_t fscache_n_allocs_nobufs;
63229-extern atomic_t fscache_n_allocs_intr;
63230-extern atomic_t fscache_n_allocs_object_dead;
63231-extern atomic_t fscache_n_alloc_ops;
63232-extern atomic_t fscache_n_alloc_op_waits;
63233+extern atomic_unchecked_t fscache_n_allocs;
63234+extern atomic_unchecked_t fscache_n_allocs_ok;
63235+extern atomic_unchecked_t fscache_n_allocs_wait;
63236+extern atomic_unchecked_t fscache_n_allocs_nobufs;
63237+extern atomic_unchecked_t fscache_n_allocs_intr;
63238+extern atomic_unchecked_t fscache_n_allocs_object_dead;
63239+extern atomic_unchecked_t fscache_n_alloc_ops;
63240+extern atomic_unchecked_t fscache_n_alloc_op_waits;
63241
63242-extern atomic_t fscache_n_retrievals;
63243-extern atomic_t fscache_n_retrievals_ok;
63244-extern atomic_t fscache_n_retrievals_wait;
63245-extern atomic_t fscache_n_retrievals_nodata;
63246-extern atomic_t fscache_n_retrievals_nobufs;
63247-extern atomic_t fscache_n_retrievals_intr;
63248-extern atomic_t fscache_n_retrievals_nomem;
63249-extern atomic_t fscache_n_retrievals_object_dead;
63250-extern atomic_t fscache_n_retrieval_ops;
63251-extern atomic_t fscache_n_retrieval_op_waits;
63252+extern atomic_unchecked_t fscache_n_retrievals;
63253+extern atomic_unchecked_t fscache_n_retrievals_ok;
63254+extern atomic_unchecked_t fscache_n_retrievals_wait;
63255+extern atomic_unchecked_t fscache_n_retrievals_nodata;
63256+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
63257+extern atomic_unchecked_t fscache_n_retrievals_intr;
63258+extern atomic_unchecked_t fscache_n_retrievals_nomem;
63259+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
63260+extern atomic_unchecked_t fscache_n_retrieval_ops;
63261+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
63262
63263-extern atomic_t fscache_n_stores;
63264-extern atomic_t fscache_n_stores_ok;
63265-extern atomic_t fscache_n_stores_again;
63266-extern atomic_t fscache_n_stores_nobufs;
63267-extern atomic_t fscache_n_stores_oom;
63268-extern atomic_t fscache_n_store_ops;
63269-extern atomic_t fscache_n_store_calls;
63270-extern atomic_t fscache_n_store_pages;
63271-extern atomic_t fscache_n_store_radix_deletes;
63272-extern atomic_t fscache_n_store_pages_over_limit;
63273+extern atomic_unchecked_t fscache_n_stores;
63274+extern atomic_unchecked_t fscache_n_stores_ok;
63275+extern atomic_unchecked_t fscache_n_stores_again;
63276+extern atomic_unchecked_t fscache_n_stores_nobufs;
63277+extern atomic_unchecked_t fscache_n_stores_oom;
63278+extern atomic_unchecked_t fscache_n_store_ops;
63279+extern atomic_unchecked_t fscache_n_store_calls;
63280+extern atomic_unchecked_t fscache_n_store_pages;
63281+extern atomic_unchecked_t fscache_n_store_radix_deletes;
63282+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
63283
63284-extern atomic_t fscache_n_store_vmscan_not_storing;
63285-extern atomic_t fscache_n_store_vmscan_gone;
63286-extern atomic_t fscache_n_store_vmscan_busy;
63287-extern atomic_t fscache_n_store_vmscan_cancelled;
63288-extern atomic_t fscache_n_store_vmscan_wait;
63289+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63290+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
63291+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
63292+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63293+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
63294
63295-extern atomic_t fscache_n_marks;
63296-extern atomic_t fscache_n_uncaches;
63297+extern atomic_unchecked_t fscache_n_marks;
63298+extern atomic_unchecked_t fscache_n_uncaches;
63299
63300-extern atomic_t fscache_n_acquires;
63301-extern atomic_t fscache_n_acquires_null;
63302-extern atomic_t fscache_n_acquires_no_cache;
63303-extern atomic_t fscache_n_acquires_ok;
63304-extern atomic_t fscache_n_acquires_nobufs;
63305-extern atomic_t fscache_n_acquires_oom;
63306+extern atomic_unchecked_t fscache_n_acquires;
63307+extern atomic_unchecked_t fscache_n_acquires_null;
63308+extern atomic_unchecked_t fscache_n_acquires_no_cache;
63309+extern atomic_unchecked_t fscache_n_acquires_ok;
63310+extern atomic_unchecked_t fscache_n_acquires_nobufs;
63311+extern atomic_unchecked_t fscache_n_acquires_oom;
63312
63313-extern atomic_t fscache_n_invalidates;
63314-extern atomic_t fscache_n_invalidates_run;
63315+extern atomic_unchecked_t fscache_n_invalidates;
63316+extern atomic_unchecked_t fscache_n_invalidates_run;
63317
63318-extern atomic_t fscache_n_updates;
63319-extern atomic_t fscache_n_updates_null;
63320-extern atomic_t fscache_n_updates_run;
63321+extern atomic_unchecked_t fscache_n_updates;
63322+extern atomic_unchecked_t fscache_n_updates_null;
63323+extern atomic_unchecked_t fscache_n_updates_run;
63324
63325-extern atomic_t fscache_n_relinquishes;
63326-extern atomic_t fscache_n_relinquishes_null;
63327-extern atomic_t fscache_n_relinquishes_waitcrt;
63328-extern atomic_t fscache_n_relinquishes_retire;
63329+extern atomic_unchecked_t fscache_n_relinquishes;
63330+extern atomic_unchecked_t fscache_n_relinquishes_null;
63331+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63332+extern atomic_unchecked_t fscache_n_relinquishes_retire;
63333
63334-extern atomic_t fscache_n_cookie_index;
63335-extern atomic_t fscache_n_cookie_data;
63336-extern atomic_t fscache_n_cookie_special;
63337+extern atomic_unchecked_t fscache_n_cookie_index;
63338+extern atomic_unchecked_t fscache_n_cookie_data;
63339+extern atomic_unchecked_t fscache_n_cookie_special;
63340
63341-extern atomic_t fscache_n_object_alloc;
63342-extern atomic_t fscache_n_object_no_alloc;
63343-extern atomic_t fscache_n_object_lookups;
63344-extern atomic_t fscache_n_object_lookups_negative;
63345-extern atomic_t fscache_n_object_lookups_positive;
63346-extern atomic_t fscache_n_object_lookups_timed_out;
63347-extern atomic_t fscache_n_object_created;
63348-extern atomic_t fscache_n_object_avail;
63349-extern atomic_t fscache_n_object_dead;
63350+extern atomic_unchecked_t fscache_n_object_alloc;
63351+extern atomic_unchecked_t fscache_n_object_no_alloc;
63352+extern atomic_unchecked_t fscache_n_object_lookups;
63353+extern atomic_unchecked_t fscache_n_object_lookups_negative;
63354+extern atomic_unchecked_t fscache_n_object_lookups_positive;
63355+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
63356+extern atomic_unchecked_t fscache_n_object_created;
63357+extern atomic_unchecked_t fscache_n_object_avail;
63358+extern atomic_unchecked_t fscache_n_object_dead;
63359
63360-extern atomic_t fscache_n_checkaux_none;
63361-extern atomic_t fscache_n_checkaux_okay;
63362-extern atomic_t fscache_n_checkaux_update;
63363-extern atomic_t fscache_n_checkaux_obsolete;
63364+extern atomic_unchecked_t fscache_n_checkaux_none;
63365+extern atomic_unchecked_t fscache_n_checkaux_okay;
63366+extern atomic_unchecked_t fscache_n_checkaux_update;
63367+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
63368
63369 extern atomic_t fscache_n_cop_alloc_object;
63370 extern atomic_t fscache_n_cop_lookup_object;
63371@@ -278,6 +278,11 @@ static inline void fscache_stat(atomic_t *stat)
63372 atomic_inc(stat);
63373 }
63374
63375+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
63376+{
63377+ atomic_inc_unchecked(stat);
63378+}
63379+
63380 static inline void fscache_stat_d(atomic_t *stat)
63381 {
63382 atomic_dec(stat);
63383@@ -290,6 +295,7 @@ extern const struct file_operations fscache_stats_fops;
63384
63385 #define __fscache_stat(stat) (NULL)
63386 #define fscache_stat(stat) do {} while (0)
63387+#define fscache_stat_unchecked(stat) do {} while (0)
63388 #define fscache_stat_d(stat) do {} while (0)
63389 #endif
63390
63391diff --git a/fs/fscache/object.c b/fs/fscache/object.c
63392index d3b4539..ed0c659 100644
63393--- a/fs/fscache/object.c
63394+++ b/fs/fscache/object.c
63395@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
63396 _debug("LOOKUP \"%s\" in \"%s\"",
63397 cookie->def->name, object->cache->tag->name);
63398
63399- fscache_stat(&fscache_n_object_lookups);
63400+ fscache_stat_unchecked(&fscache_n_object_lookups);
63401 fscache_stat(&fscache_n_cop_lookup_object);
63402 ret = object->cache->ops->lookup_object(object);
63403 fscache_stat_d(&fscache_n_cop_lookup_object);
63404@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
63405 if (ret == -ETIMEDOUT) {
63406 /* probably stuck behind another object, so move this one to
63407 * the back of the queue */
63408- fscache_stat(&fscache_n_object_lookups_timed_out);
63409+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
63410 _leave(" [timeout]");
63411 return NO_TRANSIT;
63412 }
63413@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
63414 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
63415
63416 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
63417- fscache_stat(&fscache_n_object_lookups_negative);
63418+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
63419
63420 /* Allow write requests to begin stacking up and read requests to begin
63421 * returning ENODATA.
63422@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
63423 /* if we were still looking up, then we must have a positive lookup
63424 * result, in which case there may be data available */
63425 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
63426- fscache_stat(&fscache_n_object_lookups_positive);
63427+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
63428
63429 /* We do (presumably) have data */
63430 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
63431@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
63432 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
63433 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
63434 } else {
63435- fscache_stat(&fscache_n_object_created);
63436+ fscache_stat_unchecked(&fscache_n_object_created);
63437 }
63438
63439 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
63440@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
63441 fscache_stat_d(&fscache_n_cop_lookup_complete);
63442
63443 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
63444- fscache_stat(&fscache_n_object_avail);
63445+ fscache_stat_unchecked(&fscache_n_object_avail);
63446
63447 _leave("");
63448 return transit_to(JUMPSTART_DEPS);
63449@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
63450
63451 /* this just shifts the object release to the work processor */
63452 fscache_put_object(object);
63453- fscache_stat(&fscache_n_object_dead);
63454+ fscache_stat_unchecked(&fscache_n_object_dead);
63455
63456 _leave("");
63457 return transit_to(OBJECT_DEAD);
63458@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
63459 enum fscache_checkaux result;
63460
63461 if (!object->cookie->def->check_aux) {
63462- fscache_stat(&fscache_n_checkaux_none);
63463+ fscache_stat_unchecked(&fscache_n_checkaux_none);
63464 return FSCACHE_CHECKAUX_OKAY;
63465 }
63466
63467@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
63468 switch (result) {
63469 /* entry okay as is */
63470 case FSCACHE_CHECKAUX_OKAY:
63471- fscache_stat(&fscache_n_checkaux_okay);
63472+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
63473 break;
63474
63475 /* entry requires update */
63476 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
63477- fscache_stat(&fscache_n_checkaux_update);
63478+ fscache_stat_unchecked(&fscache_n_checkaux_update);
63479 break;
63480
63481 /* entry requires deletion */
63482 case FSCACHE_CHECKAUX_OBSOLETE:
63483- fscache_stat(&fscache_n_checkaux_obsolete);
63484+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
63485 break;
63486
63487 default:
63488@@ -992,7 +992,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
63489 {
63490 const struct fscache_state *s;
63491
63492- fscache_stat(&fscache_n_invalidates_run);
63493+ fscache_stat_unchecked(&fscache_n_invalidates_run);
63494 fscache_stat(&fscache_n_cop_invalidate_object);
63495 s = _fscache_invalidate_object(object, event);
63496 fscache_stat_d(&fscache_n_cop_invalidate_object);
63497@@ -1007,7 +1007,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
63498 {
63499 _enter("{OBJ%x},%d", object->debug_id, event);
63500
63501- fscache_stat(&fscache_n_updates_run);
63502+ fscache_stat_unchecked(&fscache_n_updates_run);
63503 fscache_stat(&fscache_n_cop_update_object);
63504 object->cache->ops->update_object(object);
63505 fscache_stat_d(&fscache_n_cop_update_object);
63506diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
63507index e7b87a0..a85d47a 100644
63508--- a/fs/fscache/operation.c
63509+++ b/fs/fscache/operation.c
63510@@ -17,7 +17,7 @@
63511 #include <linux/slab.h>
63512 #include "internal.h"
63513
63514-atomic_t fscache_op_debug_id;
63515+atomic_unchecked_t fscache_op_debug_id;
63516 EXPORT_SYMBOL(fscache_op_debug_id);
63517
63518 /**
63519@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
63520 ASSERTCMP(atomic_read(&op->usage), >, 0);
63521 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
63522
63523- fscache_stat(&fscache_n_op_enqueue);
63524+ fscache_stat_unchecked(&fscache_n_op_enqueue);
63525 switch (op->flags & FSCACHE_OP_TYPE) {
63526 case FSCACHE_OP_ASYNC:
63527 _debug("queue async");
63528@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
63529 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
63530 if (op->processor)
63531 fscache_enqueue_operation(op);
63532- fscache_stat(&fscache_n_op_run);
63533+ fscache_stat_unchecked(&fscache_n_op_run);
63534 }
63535
63536 /*
63537@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
63538 if (object->n_in_progress > 0) {
63539 atomic_inc(&op->usage);
63540 list_add_tail(&op->pend_link, &object->pending_ops);
63541- fscache_stat(&fscache_n_op_pend);
63542+ fscache_stat_unchecked(&fscache_n_op_pend);
63543 } else if (!list_empty(&object->pending_ops)) {
63544 atomic_inc(&op->usage);
63545 list_add_tail(&op->pend_link, &object->pending_ops);
63546- fscache_stat(&fscache_n_op_pend);
63547+ fscache_stat_unchecked(&fscache_n_op_pend);
63548 fscache_start_operations(object);
63549 } else {
63550 ASSERTCMP(object->n_in_progress, ==, 0);
63551@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
63552 object->n_exclusive++; /* reads and writes must wait */
63553 atomic_inc(&op->usage);
63554 list_add_tail(&op->pend_link, &object->pending_ops);
63555- fscache_stat(&fscache_n_op_pend);
63556+ fscache_stat_unchecked(&fscache_n_op_pend);
63557 ret = 0;
63558 } else {
63559 /* If we're in any other state, there must have been an I/O
63560@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
63561 if (object->n_exclusive > 0) {
63562 atomic_inc(&op->usage);
63563 list_add_tail(&op->pend_link, &object->pending_ops);
63564- fscache_stat(&fscache_n_op_pend);
63565+ fscache_stat_unchecked(&fscache_n_op_pend);
63566 } else if (!list_empty(&object->pending_ops)) {
63567 atomic_inc(&op->usage);
63568 list_add_tail(&op->pend_link, &object->pending_ops);
63569- fscache_stat(&fscache_n_op_pend);
63570+ fscache_stat_unchecked(&fscache_n_op_pend);
63571 fscache_start_operations(object);
63572 } else {
63573 ASSERTCMP(object->n_exclusive, ==, 0);
63574@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
63575 object->n_ops++;
63576 atomic_inc(&op->usage);
63577 list_add_tail(&op->pend_link, &object->pending_ops);
63578- fscache_stat(&fscache_n_op_pend);
63579+ fscache_stat_unchecked(&fscache_n_op_pend);
63580 ret = 0;
63581 } else if (fscache_object_is_dying(object)) {
63582- fscache_stat(&fscache_n_op_rejected);
63583+ fscache_stat_unchecked(&fscache_n_op_rejected);
63584 op->state = FSCACHE_OP_ST_CANCELLED;
63585 ret = -ENOBUFS;
63586 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
63587@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
63588 ret = -EBUSY;
63589 if (op->state == FSCACHE_OP_ST_PENDING) {
63590 ASSERT(!list_empty(&op->pend_link));
63591- fscache_stat(&fscache_n_op_cancelled);
63592+ fscache_stat_unchecked(&fscache_n_op_cancelled);
63593 list_del_init(&op->pend_link);
63594 if (do_cancel)
63595 do_cancel(op);
63596@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
63597 while (!list_empty(&object->pending_ops)) {
63598 op = list_entry(object->pending_ops.next,
63599 struct fscache_operation, pend_link);
63600- fscache_stat(&fscache_n_op_cancelled);
63601+ fscache_stat_unchecked(&fscache_n_op_cancelled);
63602 list_del_init(&op->pend_link);
63603
63604 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
63605@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
63606 op->state, ==, FSCACHE_OP_ST_CANCELLED);
63607 op->state = FSCACHE_OP_ST_DEAD;
63608
63609- fscache_stat(&fscache_n_op_release);
63610+ fscache_stat_unchecked(&fscache_n_op_release);
63611
63612 if (op->release) {
63613 op->release(op);
63614@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
63615 * lock, and defer it otherwise */
63616 if (!spin_trylock(&object->lock)) {
63617 _debug("defer put");
63618- fscache_stat(&fscache_n_op_deferred_release);
63619+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
63620
63621 cache = object->cache;
63622 spin_lock(&cache->op_gc_list_lock);
63623@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
63624
63625 _debug("GC DEFERRED REL OBJ%x OP%x",
63626 object->debug_id, op->debug_id);
63627- fscache_stat(&fscache_n_op_gc);
63628+ fscache_stat_unchecked(&fscache_n_op_gc);
63629
63630 ASSERTCMP(atomic_read(&op->usage), ==, 0);
63631 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
63632diff --git a/fs/fscache/page.c b/fs/fscache/page.c
63633index ed70714..67f4982 100644
63634--- a/fs/fscache/page.c
63635+++ b/fs/fscache/page.c
63636@@ -61,7 +61,7 @@ try_again:
63637 val = radix_tree_lookup(&cookie->stores, page->index);
63638 if (!val) {
63639 rcu_read_unlock();
63640- fscache_stat(&fscache_n_store_vmscan_not_storing);
63641+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
63642 __fscache_uncache_page(cookie, page);
63643 return true;
63644 }
63645@@ -91,11 +91,11 @@ try_again:
63646 spin_unlock(&cookie->stores_lock);
63647
63648 if (xpage) {
63649- fscache_stat(&fscache_n_store_vmscan_cancelled);
63650- fscache_stat(&fscache_n_store_radix_deletes);
63651+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
63652+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63653 ASSERTCMP(xpage, ==, page);
63654 } else {
63655- fscache_stat(&fscache_n_store_vmscan_gone);
63656+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
63657 }
63658
63659 wake_up_bit(&cookie->flags, 0);
63660@@ -110,11 +110,11 @@ page_busy:
63661 * sleeping on memory allocation, so we may need to impose a timeout
63662 * too. */
63663 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
63664- fscache_stat(&fscache_n_store_vmscan_busy);
63665+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
63666 return false;
63667 }
63668
63669- fscache_stat(&fscache_n_store_vmscan_wait);
63670+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
63671 __fscache_wait_on_page_write(cookie, page);
63672 gfp &= ~__GFP_WAIT;
63673 goto try_again;
63674@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
63675 FSCACHE_COOKIE_STORING_TAG);
63676 if (!radix_tree_tag_get(&cookie->stores, page->index,
63677 FSCACHE_COOKIE_PENDING_TAG)) {
63678- fscache_stat(&fscache_n_store_radix_deletes);
63679+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63680 xpage = radix_tree_delete(&cookie->stores, page->index);
63681 }
63682 spin_unlock(&cookie->stores_lock);
63683@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
63684
63685 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
63686
63687- fscache_stat(&fscache_n_attr_changed_calls);
63688+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
63689
63690 if (fscache_object_is_active(object)) {
63691 fscache_stat(&fscache_n_cop_attr_changed);
63692@@ -188,11 +188,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
63693
63694 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63695
63696- fscache_stat(&fscache_n_attr_changed);
63697+ fscache_stat_unchecked(&fscache_n_attr_changed);
63698
63699 op = kzalloc(sizeof(*op), GFP_KERNEL);
63700 if (!op) {
63701- fscache_stat(&fscache_n_attr_changed_nomem);
63702+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
63703 _leave(" = -ENOMEM");
63704 return -ENOMEM;
63705 }
63706@@ -214,7 +214,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
63707 if (fscache_submit_exclusive_op(object, op) < 0)
63708 goto nobufs;
63709 spin_unlock(&cookie->lock);
63710- fscache_stat(&fscache_n_attr_changed_ok);
63711+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
63712 fscache_put_operation(op);
63713 _leave(" = 0");
63714 return 0;
63715@@ -225,7 +225,7 @@ nobufs:
63716 kfree(op);
63717 if (wake_cookie)
63718 __fscache_wake_unused_cookie(cookie);
63719- fscache_stat(&fscache_n_attr_changed_nobufs);
63720+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
63721 _leave(" = %d", -ENOBUFS);
63722 return -ENOBUFS;
63723 }
63724@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
63725 /* allocate a retrieval operation and attempt to submit it */
63726 op = kzalloc(sizeof(*op), GFP_NOIO);
63727 if (!op) {
63728- fscache_stat(&fscache_n_retrievals_nomem);
63729+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63730 return NULL;
63731 }
63732
63733@@ -294,13 +294,13 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
63734 return 0;
63735 }
63736
63737- fscache_stat(&fscache_n_retrievals_wait);
63738+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
63739
63740 jif = jiffies;
63741 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
63742 fscache_wait_bit_interruptible,
63743 TASK_INTERRUPTIBLE) != 0) {
63744- fscache_stat(&fscache_n_retrievals_intr);
63745+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63746 _leave(" = -ERESTARTSYS");
63747 return -ERESTARTSYS;
63748 }
63749@@ -329,8 +329,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
63750 */
63751 int fscache_wait_for_operation_activation(struct fscache_object *object,
63752 struct fscache_operation *op,
63753- atomic_t *stat_op_waits,
63754- atomic_t *stat_object_dead,
63755+ atomic_unchecked_t *stat_op_waits,
63756+ atomic_unchecked_t *stat_object_dead,
63757 void (*do_cancel)(struct fscache_operation *))
63758 {
63759 int ret;
63760@@ -340,7 +340,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
63761
63762 _debug(">>> WT");
63763 if (stat_op_waits)
63764- fscache_stat(stat_op_waits);
63765+ fscache_stat_unchecked(stat_op_waits);
63766 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
63767 fscache_wait_bit_interruptible,
63768 TASK_INTERRUPTIBLE) != 0) {
63769@@ -358,7 +358,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
63770 check_if_dead:
63771 if (op->state == FSCACHE_OP_ST_CANCELLED) {
63772 if (stat_object_dead)
63773- fscache_stat(stat_object_dead);
63774+ fscache_stat_unchecked(stat_object_dead);
63775 _leave(" = -ENOBUFS [cancelled]");
63776 return -ENOBUFS;
63777 }
63778@@ -366,7 +366,7 @@ check_if_dead:
63779 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
63780 fscache_cancel_op(op, do_cancel);
63781 if (stat_object_dead)
63782- fscache_stat(stat_object_dead);
63783+ fscache_stat_unchecked(stat_object_dead);
63784 return -ENOBUFS;
63785 }
63786 return 0;
63787@@ -394,7 +394,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63788
63789 _enter("%p,%p,,,", cookie, page);
63790
63791- fscache_stat(&fscache_n_retrievals);
63792+ fscache_stat_unchecked(&fscache_n_retrievals);
63793
63794 if (hlist_empty(&cookie->backing_objects))
63795 goto nobufs;
63796@@ -436,7 +436,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63797 goto nobufs_unlock_dec;
63798 spin_unlock(&cookie->lock);
63799
63800- fscache_stat(&fscache_n_retrieval_ops);
63801+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63802
63803 /* pin the netfs read context in case we need to do the actual netfs
63804 * read because we've encountered a cache read failure */
63805@@ -467,15 +467,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63806
63807 error:
63808 if (ret == -ENOMEM)
63809- fscache_stat(&fscache_n_retrievals_nomem);
63810+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63811 else if (ret == -ERESTARTSYS)
63812- fscache_stat(&fscache_n_retrievals_intr);
63813+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63814 else if (ret == -ENODATA)
63815- fscache_stat(&fscache_n_retrievals_nodata);
63816+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63817 else if (ret < 0)
63818- fscache_stat(&fscache_n_retrievals_nobufs);
63819+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63820 else
63821- fscache_stat(&fscache_n_retrievals_ok);
63822+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63823
63824 fscache_put_retrieval(op);
63825 _leave(" = %d", ret);
63826@@ -490,7 +490,7 @@ nobufs_unlock:
63827 __fscache_wake_unused_cookie(cookie);
63828 kfree(op);
63829 nobufs:
63830- fscache_stat(&fscache_n_retrievals_nobufs);
63831+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63832 _leave(" = -ENOBUFS");
63833 return -ENOBUFS;
63834 }
63835@@ -529,7 +529,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63836
63837 _enter("%p,,%d,,,", cookie, *nr_pages);
63838
63839- fscache_stat(&fscache_n_retrievals);
63840+ fscache_stat_unchecked(&fscache_n_retrievals);
63841
63842 if (hlist_empty(&cookie->backing_objects))
63843 goto nobufs;
63844@@ -567,7 +567,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63845 goto nobufs_unlock_dec;
63846 spin_unlock(&cookie->lock);
63847
63848- fscache_stat(&fscache_n_retrieval_ops);
63849+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63850
63851 /* pin the netfs read context in case we need to do the actual netfs
63852 * read because we've encountered a cache read failure */
63853@@ -598,15 +598,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63854
63855 error:
63856 if (ret == -ENOMEM)
63857- fscache_stat(&fscache_n_retrievals_nomem);
63858+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63859 else if (ret == -ERESTARTSYS)
63860- fscache_stat(&fscache_n_retrievals_intr);
63861+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63862 else if (ret == -ENODATA)
63863- fscache_stat(&fscache_n_retrievals_nodata);
63864+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63865 else if (ret < 0)
63866- fscache_stat(&fscache_n_retrievals_nobufs);
63867+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63868 else
63869- fscache_stat(&fscache_n_retrievals_ok);
63870+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63871
63872 fscache_put_retrieval(op);
63873 _leave(" = %d", ret);
63874@@ -621,7 +621,7 @@ nobufs_unlock:
63875 if (wake_cookie)
63876 __fscache_wake_unused_cookie(cookie);
63877 nobufs:
63878- fscache_stat(&fscache_n_retrievals_nobufs);
63879+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63880 _leave(" = -ENOBUFS");
63881 return -ENOBUFS;
63882 }
63883@@ -646,7 +646,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63884
63885 _enter("%p,%p,,,", cookie, page);
63886
63887- fscache_stat(&fscache_n_allocs);
63888+ fscache_stat_unchecked(&fscache_n_allocs);
63889
63890 if (hlist_empty(&cookie->backing_objects))
63891 goto nobufs;
63892@@ -680,7 +680,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63893 goto nobufs_unlock_dec;
63894 spin_unlock(&cookie->lock);
63895
63896- fscache_stat(&fscache_n_alloc_ops);
63897+ fscache_stat_unchecked(&fscache_n_alloc_ops);
63898
63899 ret = fscache_wait_for_operation_activation(
63900 object, &op->op,
63901@@ -697,11 +697,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63902
63903 error:
63904 if (ret == -ERESTARTSYS)
63905- fscache_stat(&fscache_n_allocs_intr);
63906+ fscache_stat_unchecked(&fscache_n_allocs_intr);
63907 else if (ret < 0)
63908- fscache_stat(&fscache_n_allocs_nobufs);
63909+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63910 else
63911- fscache_stat(&fscache_n_allocs_ok);
63912+ fscache_stat_unchecked(&fscache_n_allocs_ok);
63913
63914 fscache_put_retrieval(op);
63915 _leave(" = %d", ret);
63916@@ -715,7 +715,7 @@ nobufs_unlock:
63917 if (wake_cookie)
63918 __fscache_wake_unused_cookie(cookie);
63919 nobufs:
63920- fscache_stat(&fscache_n_allocs_nobufs);
63921+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63922 _leave(" = -ENOBUFS");
63923 return -ENOBUFS;
63924 }
63925@@ -791,7 +791,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63926
63927 spin_lock(&cookie->stores_lock);
63928
63929- fscache_stat(&fscache_n_store_calls);
63930+ fscache_stat_unchecked(&fscache_n_store_calls);
63931
63932 /* find a page to store */
63933 page = NULL;
63934@@ -802,7 +802,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63935 page = results[0];
63936 _debug("gang %d [%lx]", n, page->index);
63937 if (page->index > op->store_limit) {
63938- fscache_stat(&fscache_n_store_pages_over_limit);
63939+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
63940 goto superseded;
63941 }
63942
63943@@ -814,7 +814,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63944 spin_unlock(&cookie->stores_lock);
63945 spin_unlock(&object->lock);
63946
63947- fscache_stat(&fscache_n_store_pages);
63948+ fscache_stat_unchecked(&fscache_n_store_pages);
63949 fscache_stat(&fscache_n_cop_write_page);
63950 ret = object->cache->ops->write_page(op, page);
63951 fscache_stat_d(&fscache_n_cop_write_page);
63952@@ -918,7 +918,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63953 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63954 ASSERT(PageFsCache(page));
63955
63956- fscache_stat(&fscache_n_stores);
63957+ fscache_stat_unchecked(&fscache_n_stores);
63958
63959 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
63960 _leave(" = -ENOBUFS [invalidating]");
63961@@ -977,7 +977,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63962 spin_unlock(&cookie->stores_lock);
63963 spin_unlock(&object->lock);
63964
63965- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
63966+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63967 op->store_limit = object->store_limit;
63968
63969 __fscache_use_cookie(cookie);
63970@@ -986,8 +986,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63971
63972 spin_unlock(&cookie->lock);
63973 radix_tree_preload_end();
63974- fscache_stat(&fscache_n_store_ops);
63975- fscache_stat(&fscache_n_stores_ok);
63976+ fscache_stat_unchecked(&fscache_n_store_ops);
63977+ fscache_stat_unchecked(&fscache_n_stores_ok);
63978
63979 /* the work queue now carries its own ref on the object */
63980 fscache_put_operation(&op->op);
63981@@ -995,14 +995,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63982 return 0;
63983
63984 already_queued:
63985- fscache_stat(&fscache_n_stores_again);
63986+ fscache_stat_unchecked(&fscache_n_stores_again);
63987 already_pending:
63988 spin_unlock(&cookie->stores_lock);
63989 spin_unlock(&object->lock);
63990 spin_unlock(&cookie->lock);
63991 radix_tree_preload_end();
63992 kfree(op);
63993- fscache_stat(&fscache_n_stores_ok);
63994+ fscache_stat_unchecked(&fscache_n_stores_ok);
63995 _leave(" = 0");
63996 return 0;
63997
63998@@ -1024,14 +1024,14 @@ nobufs:
63999 kfree(op);
64000 if (wake_cookie)
64001 __fscache_wake_unused_cookie(cookie);
64002- fscache_stat(&fscache_n_stores_nobufs);
64003+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
64004 _leave(" = -ENOBUFS");
64005 return -ENOBUFS;
64006
64007 nomem_free:
64008 kfree(op);
64009 nomem:
64010- fscache_stat(&fscache_n_stores_oom);
64011+ fscache_stat_unchecked(&fscache_n_stores_oom);
64012 _leave(" = -ENOMEM");
64013 return -ENOMEM;
64014 }
64015@@ -1049,7 +1049,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
64016 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64017 ASSERTCMP(page, !=, NULL);
64018
64019- fscache_stat(&fscache_n_uncaches);
64020+ fscache_stat_unchecked(&fscache_n_uncaches);
64021
64022 /* cache withdrawal may beat us to it */
64023 if (!PageFsCache(page))
64024@@ -1100,7 +1100,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
64025 struct fscache_cookie *cookie = op->op.object->cookie;
64026
64027 #ifdef CONFIG_FSCACHE_STATS
64028- atomic_inc(&fscache_n_marks);
64029+ atomic_inc_unchecked(&fscache_n_marks);
64030 #endif
64031
64032 _debug("- mark %p{%lx}", page, page->index);
64033diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
64034index 40d13c7..ddf52b9 100644
64035--- a/fs/fscache/stats.c
64036+++ b/fs/fscache/stats.c
64037@@ -18,99 +18,99 @@
64038 /*
64039 * operation counters
64040 */
64041-atomic_t fscache_n_op_pend;
64042-atomic_t fscache_n_op_run;
64043-atomic_t fscache_n_op_enqueue;
64044-atomic_t fscache_n_op_requeue;
64045-atomic_t fscache_n_op_deferred_release;
64046-atomic_t fscache_n_op_release;
64047-atomic_t fscache_n_op_gc;
64048-atomic_t fscache_n_op_cancelled;
64049-atomic_t fscache_n_op_rejected;
64050+atomic_unchecked_t fscache_n_op_pend;
64051+atomic_unchecked_t fscache_n_op_run;
64052+atomic_unchecked_t fscache_n_op_enqueue;
64053+atomic_unchecked_t fscache_n_op_requeue;
64054+atomic_unchecked_t fscache_n_op_deferred_release;
64055+atomic_unchecked_t fscache_n_op_release;
64056+atomic_unchecked_t fscache_n_op_gc;
64057+atomic_unchecked_t fscache_n_op_cancelled;
64058+atomic_unchecked_t fscache_n_op_rejected;
64059
64060-atomic_t fscache_n_attr_changed;
64061-atomic_t fscache_n_attr_changed_ok;
64062-atomic_t fscache_n_attr_changed_nobufs;
64063-atomic_t fscache_n_attr_changed_nomem;
64064-atomic_t fscache_n_attr_changed_calls;
64065+atomic_unchecked_t fscache_n_attr_changed;
64066+atomic_unchecked_t fscache_n_attr_changed_ok;
64067+atomic_unchecked_t fscache_n_attr_changed_nobufs;
64068+atomic_unchecked_t fscache_n_attr_changed_nomem;
64069+atomic_unchecked_t fscache_n_attr_changed_calls;
64070
64071-atomic_t fscache_n_allocs;
64072-atomic_t fscache_n_allocs_ok;
64073-atomic_t fscache_n_allocs_wait;
64074-atomic_t fscache_n_allocs_nobufs;
64075-atomic_t fscache_n_allocs_intr;
64076-atomic_t fscache_n_allocs_object_dead;
64077-atomic_t fscache_n_alloc_ops;
64078-atomic_t fscache_n_alloc_op_waits;
64079+atomic_unchecked_t fscache_n_allocs;
64080+atomic_unchecked_t fscache_n_allocs_ok;
64081+atomic_unchecked_t fscache_n_allocs_wait;
64082+atomic_unchecked_t fscache_n_allocs_nobufs;
64083+atomic_unchecked_t fscache_n_allocs_intr;
64084+atomic_unchecked_t fscache_n_allocs_object_dead;
64085+atomic_unchecked_t fscache_n_alloc_ops;
64086+atomic_unchecked_t fscache_n_alloc_op_waits;
64087
64088-atomic_t fscache_n_retrievals;
64089-atomic_t fscache_n_retrievals_ok;
64090-atomic_t fscache_n_retrievals_wait;
64091-atomic_t fscache_n_retrievals_nodata;
64092-atomic_t fscache_n_retrievals_nobufs;
64093-atomic_t fscache_n_retrievals_intr;
64094-atomic_t fscache_n_retrievals_nomem;
64095-atomic_t fscache_n_retrievals_object_dead;
64096-atomic_t fscache_n_retrieval_ops;
64097-atomic_t fscache_n_retrieval_op_waits;
64098+atomic_unchecked_t fscache_n_retrievals;
64099+atomic_unchecked_t fscache_n_retrievals_ok;
64100+atomic_unchecked_t fscache_n_retrievals_wait;
64101+atomic_unchecked_t fscache_n_retrievals_nodata;
64102+atomic_unchecked_t fscache_n_retrievals_nobufs;
64103+atomic_unchecked_t fscache_n_retrievals_intr;
64104+atomic_unchecked_t fscache_n_retrievals_nomem;
64105+atomic_unchecked_t fscache_n_retrievals_object_dead;
64106+atomic_unchecked_t fscache_n_retrieval_ops;
64107+atomic_unchecked_t fscache_n_retrieval_op_waits;
64108
64109-atomic_t fscache_n_stores;
64110-atomic_t fscache_n_stores_ok;
64111-atomic_t fscache_n_stores_again;
64112-atomic_t fscache_n_stores_nobufs;
64113-atomic_t fscache_n_stores_oom;
64114-atomic_t fscache_n_store_ops;
64115-atomic_t fscache_n_store_calls;
64116-atomic_t fscache_n_store_pages;
64117-atomic_t fscache_n_store_radix_deletes;
64118-atomic_t fscache_n_store_pages_over_limit;
64119+atomic_unchecked_t fscache_n_stores;
64120+atomic_unchecked_t fscache_n_stores_ok;
64121+atomic_unchecked_t fscache_n_stores_again;
64122+atomic_unchecked_t fscache_n_stores_nobufs;
64123+atomic_unchecked_t fscache_n_stores_oom;
64124+atomic_unchecked_t fscache_n_store_ops;
64125+atomic_unchecked_t fscache_n_store_calls;
64126+atomic_unchecked_t fscache_n_store_pages;
64127+atomic_unchecked_t fscache_n_store_radix_deletes;
64128+atomic_unchecked_t fscache_n_store_pages_over_limit;
64129
64130-atomic_t fscache_n_store_vmscan_not_storing;
64131-atomic_t fscache_n_store_vmscan_gone;
64132-atomic_t fscache_n_store_vmscan_busy;
64133-atomic_t fscache_n_store_vmscan_cancelled;
64134-atomic_t fscache_n_store_vmscan_wait;
64135+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
64136+atomic_unchecked_t fscache_n_store_vmscan_gone;
64137+atomic_unchecked_t fscache_n_store_vmscan_busy;
64138+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
64139+atomic_unchecked_t fscache_n_store_vmscan_wait;
64140
64141-atomic_t fscache_n_marks;
64142-atomic_t fscache_n_uncaches;
64143+atomic_unchecked_t fscache_n_marks;
64144+atomic_unchecked_t fscache_n_uncaches;
64145
64146-atomic_t fscache_n_acquires;
64147-atomic_t fscache_n_acquires_null;
64148-atomic_t fscache_n_acquires_no_cache;
64149-atomic_t fscache_n_acquires_ok;
64150-atomic_t fscache_n_acquires_nobufs;
64151-atomic_t fscache_n_acquires_oom;
64152+atomic_unchecked_t fscache_n_acquires;
64153+atomic_unchecked_t fscache_n_acquires_null;
64154+atomic_unchecked_t fscache_n_acquires_no_cache;
64155+atomic_unchecked_t fscache_n_acquires_ok;
64156+atomic_unchecked_t fscache_n_acquires_nobufs;
64157+atomic_unchecked_t fscache_n_acquires_oom;
64158
64159-atomic_t fscache_n_invalidates;
64160-atomic_t fscache_n_invalidates_run;
64161+atomic_unchecked_t fscache_n_invalidates;
64162+atomic_unchecked_t fscache_n_invalidates_run;
64163
64164-atomic_t fscache_n_updates;
64165-atomic_t fscache_n_updates_null;
64166-atomic_t fscache_n_updates_run;
64167+atomic_unchecked_t fscache_n_updates;
64168+atomic_unchecked_t fscache_n_updates_null;
64169+atomic_unchecked_t fscache_n_updates_run;
64170
64171-atomic_t fscache_n_relinquishes;
64172-atomic_t fscache_n_relinquishes_null;
64173-atomic_t fscache_n_relinquishes_waitcrt;
64174-atomic_t fscache_n_relinquishes_retire;
64175+atomic_unchecked_t fscache_n_relinquishes;
64176+atomic_unchecked_t fscache_n_relinquishes_null;
64177+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
64178+atomic_unchecked_t fscache_n_relinquishes_retire;
64179
64180-atomic_t fscache_n_cookie_index;
64181-atomic_t fscache_n_cookie_data;
64182-atomic_t fscache_n_cookie_special;
64183+atomic_unchecked_t fscache_n_cookie_index;
64184+atomic_unchecked_t fscache_n_cookie_data;
64185+atomic_unchecked_t fscache_n_cookie_special;
64186
64187-atomic_t fscache_n_object_alloc;
64188-atomic_t fscache_n_object_no_alloc;
64189-atomic_t fscache_n_object_lookups;
64190-atomic_t fscache_n_object_lookups_negative;
64191-atomic_t fscache_n_object_lookups_positive;
64192-atomic_t fscache_n_object_lookups_timed_out;
64193-atomic_t fscache_n_object_created;
64194-atomic_t fscache_n_object_avail;
64195-atomic_t fscache_n_object_dead;
64196+atomic_unchecked_t fscache_n_object_alloc;
64197+atomic_unchecked_t fscache_n_object_no_alloc;
64198+atomic_unchecked_t fscache_n_object_lookups;
64199+atomic_unchecked_t fscache_n_object_lookups_negative;
64200+atomic_unchecked_t fscache_n_object_lookups_positive;
64201+atomic_unchecked_t fscache_n_object_lookups_timed_out;
64202+atomic_unchecked_t fscache_n_object_created;
64203+atomic_unchecked_t fscache_n_object_avail;
64204+atomic_unchecked_t fscache_n_object_dead;
64205
64206-atomic_t fscache_n_checkaux_none;
64207-atomic_t fscache_n_checkaux_okay;
64208-atomic_t fscache_n_checkaux_update;
64209-atomic_t fscache_n_checkaux_obsolete;
64210+atomic_unchecked_t fscache_n_checkaux_none;
64211+atomic_unchecked_t fscache_n_checkaux_okay;
64212+atomic_unchecked_t fscache_n_checkaux_update;
64213+atomic_unchecked_t fscache_n_checkaux_obsolete;
64214
64215 atomic_t fscache_n_cop_alloc_object;
64216 atomic_t fscache_n_cop_lookup_object;
64217@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
64218 seq_puts(m, "FS-Cache statistics\n");
64219
64220 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
64221- atomic_read(&fscache_n_cookie_index),
64222- atomic_read(&fscache_n_cookie_data),
64223- atomic_read(&fscache_n_cookie_special));
64224+ atomic_read_unchecked(&fscache_n_cookie_index),
64225+ atomic_read_unchecked(&fscache_n_cookie_data),
64226+ atomic_read_unchecked(&fscache_n_cookie_special));
64227
64228 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
64229- atomic_read(&fscache_n_object_alloc),
64230- atomic_read(&fscache_n_object_no_alloc),
64231- atomic_read(&fscache_n_object_avail),
64232- atomic_read(&fscache_n_object_dead));
64233+ atomic_read_unchecked(&fscache_n_object_alloc),
64234+ atomic_read_unchecked(&fscache_n_object_no_alloc),
64235+ atomic_read_unchecked(&fscache_n_object_avail),
64236+ atomic_read_unchecked(&fscache_n_object_dead));
64237 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
64238- atomic_read(&fscache_n_checkaux_none),
64239- atomic_read(&fscache_n_checkaux_okay),
64240- atomic_read(&fscache_n_checkaux_update),
64241- atomic_read(&fscache_n_checkaux_obsolete));
64242+ atomic_read_unchecked(&fscache_n_checkaux_none),
64243+ atomic_read_unchecked(&fscache_n_checkaux_okay),
64244+ atomic_read_unchecked(&fscache_n_checkaux_update),
64245+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
64246
64247 seq_printf(m, "Pages : mrk=%u unc=%u\n",
64248- atomic_read(&fscache_n_marks),
64249- atomic_read(&fscache_n_uncaches));
64250+ atomic_read_unchecked(&fscache_n_marks),
64251+ atomic_read_unchecked(&fscache_n_uncaches));
64252
64253 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
64254 " oom=%u\n",
64255- atomic_read(&fscache_n_acquires),
64256- atomic_read(&fscache_n_acquires_null),
64257- atomic_read(&fscache_n_acquires_no_cache),
64258- atomic_read(&fscache_n_acquires_ok),
64259- atomic_read(&fscache_n_acquires_nobufs),
64260- atomic_read(&fscache_n_acquires_oom));
64261+ atomic_read_unchecked(&fscache_n_acquires),
64262+ atomic_read_unchecked(&fscache_n_acquires_null),
64263+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
64264+ atomic_read_unchecked(&fscache_n_acquires_ok),
64265+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
64266+ atomic_read_unchecked(&fscache_n_acquires_oom));
64267
64268 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
64269- atomic_read(&fscache_n_object_lookups),
64270- atomic_read(&fscache_n_object_lookups_negative),
64271- atomic_read(&fscache_n_object_lookups_positive),
64272- atomic_read(&fscache_n_object_created),
64273- atomic_read(&fscache_n_object_lookups_timed_out));
64274+ atomic_read_unchecked(&fscache_n_object_lookups),
64275+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
64276+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
64277+ atomic_read_unchecked(&fscache_n_object_created),
64278+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
64279
64280 seq_printf(m, "Invals : n=%u run=%u\n",
64281- atomic_read(&fscache_n_invalidates),
64282- atomic_read(&fscache_n_invalidates_run));
64283+ atomic_read_unchecked(&fscache_n_invalidates),
64284+ atomic_read_unchecked(&fscache_n_invalidates_run));
64285
64286 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
64287- atomic_read(&fscache_n_updates),
64288- atomic_read(&fscache_n_updates_null),
64289- atomic_read(&fscache_n_updates_run));
64290+ atomic_read_unchecked(&fscache_n_updates),
64291+ atomic_read_unchecked(&fscache_n_updates_null),
64292+ atomic_read_unchecked(&fscache_n_updates_run));
64293
64294 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
64295- atomic_read(&fscache_n_relinquishes),
64296- atomic_read(&fscache_n_relinquishes_null),
64297- atomic_read(&fscache_n_relinquishes_waitcrt),
64298- atomic_read(&fscache_n_relinquishes_retire));
64299+ atomic_read_unchecked(&fscache_n_relinquishes),
64300+ atomic_read_unchecked(&fscache_n_relinquishes_null),
64301+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
64302+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
64303
64304 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
64305- atomic_read(&fscache_n_attr_changed),
64306- atomic_read(&fscache_n_attr_changed_ok),
64307- atomic_read(&fscache_n_attr_changed_nobufs),
64308- atomic_read(&fscache_n_attr_changed_nomem),
64309- atomic_read(&fscache_n_attr_changed_calls));
64310+ atomic_read_unchecked(&fscache_n_attr_changed),
64311+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
64312+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
64313+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
64314+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
64315
64316 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
64317- atomic_read(&fscache_n_allocs),
64318- atomic_read(&fscache_n_allocs_ok),
64319- atomic_read(&fscache_n_allocs_wait),
64320- atomic_read(&fscache_n_allocs_nobufs),
64321- atomic_read(&fscache_n_allocs_intr));
64322+ atomic_read_unchecked(&fscache_n_allocs),
64323+ atomic_read_unchecked(&fscache_n_allocs_ok),
64324+ atomic_read_unchecked(&fscache_n_allocs_wait),
64325+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
64326+ atomic_read_unchecked(&fscache_n_allocs_intr));
64327 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
64328- atomic_read(&fscache_n_alloc_ops),
64329- atomic_read(&fscache_n_alloc_op_waits),
64330- atomic_read(&fscache_n_allocs_object_dead));
64331+ atomic_read_unchecked(&fscache_n_alloc_ops),
64332+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
64333+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
64334
64335 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
64336 " int=%u oom=%u\n",
64337- atomic_read(&fscache_n_retrievals),
64338- atomic_read(&fscache_n_retrievals_ok),
64339- atomic_read(&fscache_n_retrievals_wait),
64340- atomic_read(&fscache_n_retrievals_nodata),
64341- atomic_read(&fscache_n_retrievals_nobufs),
64342- atomic_read(&fscache_n_retrievals_intr),
64343- atomic_read(&fscache_n_retrievals_nomem));
64344+ atomic_read_unchecked(&fscache_n_retrievals),
64345+ atomic_read_unchecked(&fscache_n_retrievals_ok),
64346+ atomic_read_unchecked(&fscache_n_retrievals_wait),
64347+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
64348+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
64349+ atomic_read_unchecked(&fscache_n_retrievals_intr),
64350+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
64351 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
64352- atomic_read(&fscache_n_retrieval_ops),
64353- atomic_read(&fscache_n_retrieval_op_waits),
64354- atomic_read(&fscache_n_retrievals_object_dead));
64355+ atomic_read_unchecked(&fscache_n_retrieval_ops),
64356+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
64357+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
64358
64359 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
64360- atomic_read(&fscache_n_stores),
64361- atomic_read(&fscache_n_stores_ok),
64362- atomic_read(&fscache_n_stores_again),
64363- atomic_read(&fscache_n_stores_nobufs),
64364- atomic_read(&fscache_n_stores_oom));
64365+ atomic_read_unchecked(&fscache_n_stores),
64366+ atomic_read_unchecked(&fscache_n_stores_ok),
64367+ atomic_read_unchecked(&fscache_n_stores_again),
64368+ atomic_read_unchecked(&fscache_n_stores_nobufs),
64369+ atomic_read_unchecked(&fscache_n_stores_oom));
64370 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
64371- atomic_read(&fscache_n_store_ops),
64372- atomic_read(&fscache_n_store_calls),
64373- atomic_read(&fscache_n_store_pages),
64374- atomic_read(&fscache_n_store_radix_deletes),
64375- atomic_read(&fscache_n_store_pages_over_limit));
64376+ atomic_read_unchecked(&fscache_n_store_ops),
64377+ atomic_read_unchecked(&fscache_n_store_calls),
64378+ atomic_read_unchecked(&fscache_n_store_pages),
64379+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
64380+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
64381
64382 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
64383- atomic_read(&fscache_n_store_vmscan_not_storing),
64384- atomic_read(&fscache_n_store_vmscan_gone),
64385- atomic_read(&fscache_n_store_vmscan_busy),
64386- atomic_read(&fscache_n_store_vmscan_cancelled),
64387- atomic_read(&fscache_n_store_vmscan_wait));
64388+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
64389+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
64390+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
64391+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
64392+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
64393
64394 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
64395- atomic_read(&fscache_n_op_pend),
64396- atomic_read(&fscache_n_op_run),
64397- atomic_read(&fscache_n_op_enqueue),
64398- atomic_read(&fscache_n_op_cancelled),
64399- atomic_read(&fscache_n_op_rejected));
64400+ atomic_read_unchecked(&fscache_n_op_pend),
64401+ atomic_read_unchecked(&fscache_n_op_run),
64402+ atomic_read_unchecked(&fscache_n_op_enqueue),
64403+ atomic_read_unchecked(&fscache_n_op_cancelled),
64404+ atomic_read_unchecked(&fscache_n_op_rejected));
64405 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
64406- atomic_read(&fscache_n_op_deferred_release),
64407- atomic_read(&fscache_n_op_release),
64408- atomic_read(&fscache_n_op_gc));
64409+ atomic_read_unchecked(&fscache_n_op_deferred_release),
64410+ atomic_read_unchecked(&fscache_n_op_release),
64411+ atomic_read_unchecked(&fscache_n_op_gc));
64412
64413 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
64414 atomic_read(&fscache_n_cop_alloc_object),
64415diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
64416index 966ace8..030a03a 100644
64417--- a/fs/fuse/cuse.c
64418+++ b/fs/fuse/cuse.c
64419@@ -611,10 +611,12 @@ static int __init cuse_init(void)
64420 INIT_LIST_HEAD(&cuse_conntbl[i]);
64421
64422 /* inherit and extend fuse_dev_operations */
64423- cuse_channel_fops = fuse_dev_operations;
64424- cuse_channel_fops.owner = THIS_MODULE;
64425- cuse_channel_fops.open = cuse_channel_open;
64426- cuse_channel_fops.release = cuse_channel_release;
64427+ pax_open_kernel();
64428+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
64429+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
64430+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
64431+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
64432+ pax_close_kernel();
64433
64434 cuse_class = class_create(THIS_MODULE, "cuse");
64435 if (IS_ERR(cuse_class))
64436diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
64437index ca88731..8e9c55d 100644
64438--- a/fs/fuse/dev.c
64439+++ b/fs/fuse/dev.c
64440@@ -1318,7 +1318,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
64441 ret = 0;
64442 pipe_lock(pipe);
64443
64444- if (!pipe->readers) {
64445+ if (!atomic_read(&pipe->readers)) {
64446 send_sig(SIGPIPE, current, 0);
64447 if (!ret)
64448 ret = -EPIPE;
64449@@ -1347,7 +1347,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
64450 page_nr++;
64451 ret += buf->len;
64452
64453- if (pipe->files)
64454+ if (atomic_read(&pipe->files))
64455 do_wakeup = 1;
64456 }
64457
64458diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
64459index 0c60482..025724f 100644
64460--- a/fs/fuse/dir.c
64461+++ b/fs/fuse/dir.c
64462@@ -1485,7 +1485,7 @@ static char *read_link(struct dentry *dentry)
64463 return link;
64464 }
64465
64466-static void free_link(char *link)
64467+static void free_link(const char *link)
64468 {
64469 if (!IS_ERR(link))
64470 free_page((unsigned long) link);
64471diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
64472index bb529f3..454c253 100644
64473--- a/fs/hostfs/hostfs_kern.c
64474+++ b/fs/hostfs/hostfs_kern.c
64475@@ -898,7 +898,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
64476
64477 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
64478 {
64479- char *s = nd_get_link(nd);
64480+ const char *s = nd_get_link(nd);
64481 if (!IS_ERR(s))
64482 __putname(s);
64483 }
64484diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
64485index 1e2872b..7aea000 100644
64486--- a/fs/hugetlbfs/inode.c
64487+++ b/fs/hugetlbfs/inode.c
64488@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64489 struct mm_struct *mm = current->mm;
64490 struct vm_area_struct *vma;
64491 struct hstate *h = hstate_file(file);
64492+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
64493 struct vm_unmapped_area_info info;
64494
64495 if (len & ~huge_page_mask(h))
64496@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64497 return addr;
64498 }
64499
64500+#ifdef CONFIG_PAX_RANDMMAP
64501+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64502+#endif
64503+
64504 if (addr) {
64505 addr = ALIGN(addr, huge_page_size(h));
64506 vma = find_vma(mm, addr);
64507- if (TASK_SIZE - len >= addr &&
64508- (!vma || addr + len <= vma->vm_start))
64509+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
64510 return addr;
64511 }
64512
64513 info.flags = 0;
64514 info.length = len;
64515 info.low_limit = TASK_UNMAPPED_BASE;
64516+
64517+#ifdef CONFIG_PAX_RANDMMAP
64518+ if (mm->pax_flags & MF_PAX_RANDMMAP)
64519+ info.low_limit += mm->delta_mmap;
64520+#endif
64521+
64522 info.high_limit = TASK_SIZE;
64523 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
64524 info.align_offset = 0;
64525@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
64526 };
64527 MODULE_ALIAS_FS("hugetlbfs");
64528
64529-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64530+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64531
64532 static int can_do_hugetlb_shm(void)
64533 {
64534diff --git a/fs/inode.c b/fs/inode.c
64535index 6eecb7f..abec305 100644
64536--- a/fs/inode.c
64537+++ b/fs/inode.c
64538@@ -839,16 +839,20 @@ unsigned int get_next_ino(void)
64539 unsigned int *p = &get_cpu_var(last_ino);
64540 unsigned int res = *p;
64541
64542+start:
64543+
64544 #ifdef CONFIG_SMP
64545 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
64546- static atomic_t shared_last_ino;
64547- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
64548+ static atomic_unchecked_t shared_last_ino;
64549+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
64550
64551 res = next - LAST_INO_BATCH;
64552 }
64553 #endif
64554
64555- *p = ++res;
64556+ if (unlikely(!++res))
64557+ goto start; /* never zero */
64558+ *p = res;
64559 put_cpu_var(last_ino);
64560 return res;
64561 }
64562diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
64563index 4556ce1..5ddaf86 100644
64564--- a/fs/isofs/inode.c
64565+++ b/fs/isofs/inode.c
64566@@ -61,7 +61,7 @@ static void isofs_put_super(struct super_block *sb)
64567 return;
64568 }
64569
64570-static int isofs_read_inode(struct inode *);
64571+static int isofs_read_inode(struct inode *, int relocated);
64572 static int isofs_statfs (struct dentry *, struct kstatfs *);
64573
64574 static struct kmem_cache *isofs_inode_cachep;
64575@@ -1259,7 +1259,7 @@ out_toomany:
64576 goto out;
64577 }
64578
64579-static int isofs_read_inode(struct inode *inode)
64580+static int isofs_read_inode(struct inode *inode, int relocated)
64581 {
64582 struct super_block *sb = inode->i_sb;
64583 struct isofs_sb_info *sbi = ISOFS_SB(sb);
64584@@ -1404,7 +1404,7 @@ static int isofs_read_inode(struct inode *inode)
64585 */
64586
64587 if (!high_sierra) {
64588- parse_rock_ridge_inode(de, inode);
64589+ parse_rock_ridge_inode(de, inode, relocated);
64590 /* if we want uid/gid set, override the rock ridge setting */
64591 if (sbi->s_uid_set)
64592 inode->i_uid = sbi->s_uid;
64593@@ -1483,9 +1483,10 @@ static int isofs_iget5_set(struct inode *ino, void *data)
64594 * offset that point to the underlying meta-data for the inode. The
64595 * code below is otherwise similar to the iget() code in
64596 * include/linux/fs.h */
64597-struct inode *isofs_iget(struct super_block *sb,
64598- unsigned long block,
64599- unsigned long offset)
64600+struct inode *__isofs_iget(struct super_block *sb,
64601+ unsigned long block,
64602+ unsigned long offset,
64603+ int relocated)
64604 {
64605 unsigned long hashval;
64606 struct inode *inode;
64607@@ -1507,7 +1508,7 @@ struct inode *isofs_iget(struct super_block *sb,
64608 return ERR_PTR(-ENOMEM);
64609
64610 if (inode->i_state & I_NEW) {
64611- ret = isofs_read_inode(inode);
64612+ ret = isofs_read_inode(inode, relocated);
64613 if (ret < 0) {
64614 iget_failed(inode);
64615 inode = ERR_PTR(ret);
64616diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
64617index 9916723..0ac4c1f 100644
64618--- a/fs/isofs/isofs.h
64619+++ b/fs/isofs/isofs.h
64620@@ -107,7 +107,7 @@ extern int iso_date(char *, int);
64621
64622 struct inode; /* To make gcc happy */
64623
64624-extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *);
64625+extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *, int relocated);
64626 extern int get_rock_ridge_filename(struct iso_directory_record *, char *, struct inode *);
64627 extern int isofs_name_translate(struct iso_directory_record *, char *, struct inode *);
64628
64629@@ -118,9 +118,24 @@ extern struct dentry *isofs_lookup(struct inode *, struct dentry *, unsigned int
64630 extern struct buffer_head *isofs_bread(struct inode *, sector_t);
64631 extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long);
64632
64633-extern struct inode *isofs_iget(struct super_block *sb,
64634- unsigned long block,
64635- unsigned long offset);
64636+struct inode *__isofs_iget(struct super_block *sb,
64637+ unsigned long block,
64638+ unsigned long offset,
64639+ int relocated);
64640+
64641+static inline struct inode *isofs_iget(struct super_block *sb,
64642+ unsigned long block,
64643+ unsigned long offset)
64644+{
64645+ return __isofs_iget(sb, block, offset, 0);
64646+}
64647+
64648+static inline struct inode *isofs_iget_reloc(struct super_block *sb,
64649+ unsigned long block,
64650+ unsigned long offset)
64651+{
64652+ return __isofs_iget(sb, block, offset, 1);
64653+}
64654
64655 /* Because the inode number is no longer relevant to finding the
64656 * underlying meta-data for an inode, we are free to choose a more
64657diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
64658index c0bf424..f488bba 100644
64659--- a/fs/isofs/rock.c
64660+++ b/fs/isofs/rock.c
64661@@ -288,12 +288,16 @@ eio:
64662 goto out;
64663 }
64664
64665+#define RR_REGARD_XA 1
64666+#define RR_RELOC_DE 2
64667+
64668 static int
64669 parse_rock_ridge_inode_internal(struct iso_directory_record *de,
64670- struct inode *inode, int regard_xa)
64671+ struct inode *inode, int flags)
64672 {
64673 int symlink_len = 0;
64674 int cnt, sig;
64675+ unsigned int reloc_block;
64676 struct inode *reloc;
64677 struct rock_ridge *rr;
64678 int rootflag;
64679@@ -305,7 +309,7 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de,
64680
64681 init_rock_state(&rs, inode);
64682 setup_rock_ridge(de, inode, &rs);
64683- if (regard_xa) {
64684+ if (flags & RR_REGARD_XA) {
64685 rs.chr += 14;
64686 rs.len -= 14;
64687 if (rs.len < 0)
64688@@ -485,12 +489,22 @@ repeat:
64689 "relocated directory\n");
64690 goto out;
64691 case SIG('C', 'L'):
64692- ISOFS_I(inode)->i_first_extent =
64693- isonum_733(rr->u.CL.location);
64694- reloc =
64695- isofs_iget(inode->i_sb,
64696- ISOFS_I(inode)->i_first_extent,
64697- 0);
64698+ if (flags & RR_RELOC_DE) {
64699+ printk(KERN_ERR
64700+ "ISOFS: Recursive directory relocation "
64701+ "is not supported\n");
64702+ goto eio;
64703+ }
64704+ reloc_block = isonum_733(rr->u.CL.location);
64705+ if (reloc_block == ISOFS_I(inode)->i_iget5_block &&
64706+ ISOFS_I(inode)->i_iget5_offset == 0) {
64707+ printk(KERN_ERR
64708+ "ISOFS: Directory relocation points to "
64709+ "itself\n");
64710+ goto eio;
64711+ }
64712+ ISOFS_I(inode)->i_first_extent = reloc_block;
64713+ reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0);
64714 if (IS_ERR(reloc)) {
64715 ret = PTR_ERR(reloc);
64716 goto out;
64717@@ -637,9 +651,11 @@ static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit)
64718 return rpnt;
64719 }
64720
64721-int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
64722+int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
64723+ int relocated)
64724 {
64725- int result = parse_rock_ridge_inode_internal(de, inode, 0);
64726+ int flags = relocated ? RR_RELOC_DE : 0;
64727+ int result = parse_rock_ridge_inode_internal(de, inode, flags);
64728
64729 /*
64730 * if rockridge flag was reset and we didn't look for attributes
64731@@ -647,7 +663,8 @@ int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
64732 */
64733 if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
64734 && (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
64735- result = parse_rock_ridge_inode_internal(de, inode, 14);
64736+ result = parse_rock_ridge_inode_internal(de, inode,
64737+ flags | RR_REGARD_XA);
64738 }
64739 return result;
64740 }
64741diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
64742index 4a6cf28..d3a29d3 100644
64743--- a/fs/jffs2/erase.c
64744+++ b/fs/jffs2/erase.c
64745@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
64746 struct jffs2_unknown_node marker = {
64747 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
64748 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
64749- .totlen = cpu_to_je32(c->cleanmarker_size)
64750+ .totlen = cpu_to_je32(c->cleanmarker_size),
64751+ .hdr_crc = cpu_to_je32(0)
64752 };
64753
64754 jffs2_prealloc_raw_node_refs(c, jeb, 1);
64755diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
64756index a6597d6..41b30ec 100644
64757--- a/fs/jffs2/wbuf.c
64758+++ b/fs/jffs2/wbuf.c
64759@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
64760 {
64761 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
64762 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
64763- .totlen = constant_cpu_to_je32(8)
64764+ .totlen = constant_cpu_to_je32(8),
64765+ .hdr_crc = constant_cpu_to_je32(0)
64766 };
64767
64768 /*
64769diff --git a/fs/jfs/super.c b/fs/jfs/super.c
64770index adf8cb0..bb935fa 100644
64771--- a/fs/jfs/super.c
64772+++ b/fs/jfs/super.c
64773@@ -893,7 +893,7 @@ static int __init init_jfs_fs(void)
64774
64775 jfs_inode_cachep =
64776 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
64777- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
64778+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
64779 init_once);
64780 if (jfs_inode_cachep == NULL)
64781 return -ENOMEM;
64782diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
64783index a693f5b..82276a1 100644
64784--- a/fs/kernfs/dir.c
64785+++ b/fs/kernfs/dir.c
64786@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
64787 *
64788 * Returns 31 bit hash of ns + name (so it fits in an off_t )
64789 */
64790-static unsigned int kernfs_name_hash(const char *name, const void *ns)
64791+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
64792 {
64793 unsigned long hash = init_name_hash();
64794 unsigned int len = strlen(name);
64795diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
64796index d895b4b..0b8af77 100644
64797--- a/fs/kernfs/file.c
64798+++ b/fs/kernfs/file.c
64799@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
64800
64801 struct kernfs_open_node {
64802 atomic_t refcnt;
64803- atomic_t event;
64804+ atomic_unchecked_t event;
64805 wait_queue_head_t poll;
64806 struct list_head files; /* goes through kernfs_open_file.list */
64807 };
64808@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
64809 {
64810 struct kernfs_open_file *of = sf->private;
64811
64812- of->event = atomic_read(&of->kn->attr.open->event);
64813+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
64814
64815 return of->kn->attr.ops->seq_show(sf, v);
64816 }
64817@@ -375,12 +375,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
64818 return ret;
64819 }
64820
64821-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
64822- void *buf, int len, int write)
64823+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
64824+ void *buf, size_t len, int write)
64825 {
64826 struct file *file = vma->vm_file;
64827 struct kernfs_open_file *of = kernfs_of(file);
64828- int ret;
64829+ ssize_t ret;
64830
64831 if (!of->vm_ops)
64832 return -EINVAL;
64833@@ -581,7 +581,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
64834 return -ENOMEM;
64835
64836 atomic_set(&new_on->refcnt, 0);
64837- atomic_set(&new_on->event, 1);
64838+ atomic_set_unchecked(&new_on->event, 1);
64839 init_waitqueue_head(&new_on->poll);
64840 INIT_LIST_HEAD(&new_on->files);
64841 goto retry;
64842@@ -787,7 +787,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
64843
64844 kernfs_put_active(kn);
64845
64846- if (of->event != atomic_read(&on->event))
64847+ if (of->event != atomic_read_unchecked(&on->event))
64848 goto trigger;
64849
64850 return DEFAULT_POLLMASK;
64851@@ -818,7 +818,7 @@ repeat:
64852
64853 on = kn->attr.open;
64854 if (on) {
64855- atomic_inc(&on->event);
64856+ atomic_inc_unchecked(&on->event);
64857 wake_up_interruptible(&on->poll);
64858 }
64859
64860diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
64861index 8a19889..4c3069a 100644
64862--- a/fs/kernfs/symlink.c
64863+++ b/fs/kernfs/symlink.c
64864@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
64865 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
64866 void *cookie)
64867 {
64868- char *page = nd_get_link(nd);
64869+ const char *page = nd_get_link(nd);
64870 if (!IS_ERR(page))
64871 free_page((unsigned long)page);
64872 }
64873diff --git a/fs/libfs.c b/fs/libfs.c
64874index 88e3e00..979c262 100644
64875--- a/fs/libfs.c
64876+++ b/fs/libfs.c
64877@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
64878
64879 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
64880 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
64881+ char d_name[sizeof(next->d_iname)];
64882+ const unsigned char *name;
64883+
64884 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
64885 if (!simple_positive(next)) {
64886 spin_unlock(&next->d_lock);
64887@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
64888
64889 spin_unlock(&next->d_lock);
64890 spin_unlock(&dentry->d_lock);
64891- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
64892+ name = next->d_name.name;
64893+ if (name == next->d_iname) {
64894+ memcpy(d_name, name, next->d_name.len);
64895+ name = d_name;
64896+ }
64897+ if (!dir_emit(ctx, name, next->d_name.len,
64898 next->d_inode->i_ino, dt_type(next->d_inode)))
64899 return 0;
64900 spin_lock(&dentry->d_lock);
64901@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
64902 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
64903 void *cookie)
64904 {
64905- char *s = nd_get_link(nd);
64906+ const char *s = nd_get_link(nd);
64907 if (!IS_ERR(s))
64908 kfree(s);
64909 }
64910diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
64911index acd3947..1f896e2 100644
64912--- a/fs/lockd/clntproc.c
64913+++ b/fs/lockd/clntproc.c
64914@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
64915 /*
64916 * Cookie counter for NLM requests
64917 */
64918-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
64919+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
64920
64921 void nlmclnt_next_cookie(struct nlm_cookie *c)
64922 {
64923- u32 cookie = atomic_inc_return(&nlm_cookie);
64924+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
64925
64926 memcpy(c->data, &cookie, 4);
64927 c->len=4;
64928diff --git a/fs/locks.c b/fs/locks.c
64929index 717fbc4..74628c3 100644
64930--- a/fs/locks.c
64931+++ b/fs/locks.c
64932@@ -2327,7 +2327,7 @@ void locks_remove_file(struct file *filp)
64933 locks_remove_posix(filp, (fl_owner_t)filp);
64934
64935 if (filp->f_op->flock) {
64936- struct file_lock fl = {
64937+ struct file_lock flock = {
64938 .fl_owner = (fl_owner_t)filp,
64939 .fl_pid = current->tgid,
64940 .fl_file = filp,
64941@@ -2335,9 +2335,9 @@ void locks_remove_file(struct file *filp)
64942 .fl_type = F_UNLCK,
64943 .fl_end = OFFSET_MAX,
64944 };
64945- filp->f_op->flock(filp, F_SETLKW, &fl);
64946- if (fl.fl_ops && fl.fl_ops->fl_release_private)
64947- fl.fl_ops->fl_release_private(&fl);
64948+ filp->f_op->flock(filp, F_SETLKW, &flock);
64949+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
64950+ flock.fl_ops->fl_release_private(&flock);
64951 }
64952
64953 spin_lock(&inode->i_lock);
64954diff --git a/fs/mount.h b/fs/mount.h
64955index d55297f..f5b28c5 100644
64956--- a/fs/mount.h
64957+++ b/fs/mount.h
64958@@ -11,7 +11,7 @@ struct mnt_namespace {
64959 u64 seq; /* Sequence number to prevent loops */
64960 wait_queue_head_t poll;
64961 u64 event;
64962-};
64963+} __randomize_layout;
64964
64965 struct mnt_pcp {
64966 int mnt_count;
64967@@ -57,7 +57,7 @@ struct mount {
64968 int mnt_expiry_mark; /* true if marked for expiry */
64969 int mnt_pinned;
64970 struct path mnt_ex_mountpoint;
64971-};
64972+} __randomize_layout;
64973
64974 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
64975
64976diff --git a/fs/namei.c b/fs/namei.c
64977index 9eb787e..5f520b67 100644
64978--- a/fs/namei.c
64979+++ b/fs/namei.c
64980@@ -330,17 +330,32 @@ int generic_permission(struct inode *inode, int mask)
64981 if (ret != -EACCES)
64982 return ret;
64983
64984+#ifdef CONFIG_GRKERNSEC
64985+ /* we'll block if we have to log due to a denied capability use */
64986+ if (mask & MAY_NOT_BLOCK)
64987+ return -ECHILD;
64988+#endif
64989+
64990 if (S_ISDIR(inode->i_mode)) {
64991 /* DACs are overridable for directories */
64992- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64993- return 0;
64994 if (!(mask & MAY_WRITE))
64995- if (capable_wrt_inode_uidgid(inode,
64996- CAP_DAC_READ_SEARCH))
64997+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
64998+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64999 return 0;
65000+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65001+ return 0;
65002 return -EACCES;
65003 }
65004 /*
65005+ * Searching includes executable on directories, else just read.
65006+ */
65007+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
65008+ if (mask == MAY_READ)
65009+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
65010+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65011+ return 0;
65012+
65013+ /*
65014 * Read/write DACs are always overridable.
65015 * Executable DACs are overridable when there is
65016 * at least one exec bit set.
65017@@ -349,14 +364,6 @@ int generic_permission(struct inode *inode, int mask)
65018 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65019 return 0;
65020
65021- /*
65022- * Searching includes executable on directories, else just read.
65023- */
65024- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
65025- if (mask == MAY_READ)
65026- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65027- return 0;
65028-
65029 return -EACCES;
65030 }
65031 EXPORT_SYMBOL(generic_permission);
65032@@ -824,7 +831,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
65033 {
65034 struct dentry *dentry = link->dentry;
65035 int error;
65036- char *s;
65037+ const char *s;
65038
65039 BUG_ON(nd->flags & LOOKUP_RCU);
65040
65041@@ -845,6 +852,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
65042 if (error)
65043 goto out_put_nd_path;
65044
65045+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
65046+ dentry->d_inode, dentry, nd->path.mnt)) {
65047+ error = -EACCES;
65048+ goto out_put_nd_path;
65049+ }
65050+
65051 nd->last_type = LAST_BIND;
65052 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
65053 error = PTR_ERR(*p);
65054@@ -1596,6 +1609,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
65055 if (res)
65056 break;
65057 res = walk_component(nd, path, LOOKUP_FOLLOW);
65058+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
65059+ res = -EACCES;
65060 put_link(nd, &link, cookie);
65061 } while (res > 0);
65062
65063@@ -1669,7 +1684,7 @@ EXPORT_SYMBOL(full_name_hash);
65064 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
65065 {
65066 unsigned long a, b, adata, bdata, mask, hash, len;
65067- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
65068+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
65069
65070 hash = a = 0;
65071 len = -sizeof(unsigned long);
65072@@ -1953,6 +1968,8 @@ static int path_lookupat(int dfd, const char *name,
65073 if (err)
65074 break;
65075 err = lookup_last(nd, &path);
65076+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
65077+ err = -EACCES;
65078 put_link(nd, &link, cookie);
65079 }
65080 }
65081@@ -1960,6 +1977,13 @@ static int path_lookupat(int dfd, const char *name,
65082 if (!err)
65083 err = complete_walk(nd);
65084
65085+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
65086+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
65087+ path_put(&nd->path);
65088+ err = -ENOENT;
65089+ }
65090+ }
65091+
65092 if (!err && nd->flags & LOOKUP_DIRECTORY) {
65093 if (!d_can_lookup(nd->path.dentry)) {
65094 path_put(&nd->path);
65095@@ -1987,8 +2011,15 @@ static int filename_lookup(int dfd, struct filename *name,
65096 retval = path_lookupat(dfd, name->name,
65097 flags | LOOKUP_REVAL, nd);
65098
65099- if (likely(!retval))
65100+ if (likely(!retval)) {
65101 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
65102+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
65103+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
65104+ path_put(&nd->path);
65105+ return -ENOENT;
65106+ }
65107+ }
65108+ }
65109 return retval;
65110 }
65111
65112@@ -2570,6 +2601,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
65113 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
65114 return -EPERM;
65115
65116+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
65117+ return -EPERM;
65118+ if (gr_handle_rawio(inode))
65119+ return -EPERM;
65120+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
65121+ return -EACCES;
65122+
65123 return 0;
65124 }
65125
65126@@ -2801,7 +2839,7 @@ looked_up:
65127 * cleared otherwise prior to returning.
65128 */
65129 static int lookup_open(struct nameidata *nd, struct path *path,
65130- struct file *file,
65131+ struct path *link, struct file *file,
65132 const struct open_flags *op,
65133 bool got_write, int *opened)
65134 {
65135@@ -2836,6 +2874,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
65136 /* Negative dentry, just create the file */
65137 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
65138 umode_t mode = op->mode;
65139+
65140+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
65141+ error = -EACCES;
65142+ goto out_dput;
65143+ }
65144+
65145+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
65146+ error = -EACCES;
65147+ goto out_dput;
65148+ }
65149+
65150 if (!IS_POSIXACL(dir->d_inode))
65151 mode &= ~current_umask();
65152 /*
65153@@ -2857,6 +2906,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
65154 nd->flags & LOOKUP_EXCL);
65155 if (error)
65156 goto out_dput;
65157+ else
65158+ gr_handle_create(dentry, nd->path.mnt);
65159 }
65160 out_no_open:
65161 path->dentry = dentry;
65162@@ -2871,7 +2922,7 @@ out_dput:
65163 /*
65164 * Handle the last step of open()
65165 */
65166-static int do_last(struct nameidata *nd, struct path *path,
65167+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
65168 struct file *file, const struct open_flags *op,
65169 int *opened, struct filename *name)
65170 {
65171@@ -2921,6 +2972,15 @@ static int do_last(struct nameidata *nd, struct path *path,
65172 if (error)
65173 return error;
65174
65175+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
65176+ error = -ENOENT;
65177+ goto out;
65178+ }
65179+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65180+ error = -EACCES;
65181+ goto out;
65182+ }
65183+
65184 audit_inode(name, dir, LOOKUP_PARENT);
65185 error = -EISDIR;
65186 /* trailing slashes? */
65187@@ -2940,7 +3000,7 @@ retry_lookup:
65188 */
65189 }
65190 mutex_lock(&dir->d_inode->i_mutex);
65191- error = lookup_open(nd, path, file, op, got_write, opened);
65192+ error = lookup_open(nd, path, link, file, op, got_write, opened);
65193 mutex_unlock(&dir->d_inode->i_mutex);
65194
65195 if (error <= 0) {
65196@@ -2964,11 +3024,28 @@ retry_lookup:
65197 goto finish_open_created;
65198 }
65199
65200+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
65201+ error = -ENOENT;
65202+ goto exit_dput;
65203+ }
65204+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
65205+ error = -EACCES;
65206+ goto exit_dput;
65207+ }
65208+
65209 /*
65210 * create/update audit record if it already exists.
65211 */
65212- if (d_is_positive(path->dentry))
65213+ if (d_is_positive(path->dentry)) {
65214+ /* only check if O_CREAT is specified, all other checks need to go
65215+ into may_open */
65216+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
65217+ error = -EACCES;
65218+ goto exit_dput;
65219+ }
65220+
65221 audit_inode(name, path->dentry, 0);
65222+ }
65223
65224 /*
65225 * If atomic_open() acquired write access it is dropped now due to
65226@@ -3009,6 +3086,11 @@ finish_lookup:
65227 }
65228 }
65229 BUG_ON(inode != path->dentry->d_inode);
65230+ /* if we're resolving a symlink to another symlink */
65231+ if (link && gr_handle_symlink_owner(link, inode)) {
65232+ error = -EACCES;
65233+ goto out;
65234+ }
65235 return 1;
65236 }
65237
65238@@ -3018,7 +3100,6 @@ finish_lookup:
65239 save_parent.dentry = nd->path.dentry;
65240 save_parent.mnt = mntget(path->mnt);
65241 nd->path.dentry = path->dentry;
65242-
65243 }
65244 nd->inode = inode;
65245 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
65246@@ -3028,7 +3109,18 @@ finish_open:
65247 path_put(&save_parent);
65248 return error;
65249 }
65250+
65251+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
65252+ error = -ENOENT;
65253+ goto out;
65254+ }
65255+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65256+ error = -EACCES;
65257+ goto out;
65258+ }
65259+
65260 audit_inode(name, nd->path.dentry, 0);
65261+
65262 error = -EISDIR;
65263 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
65264 goto out;
65265@@ -3191,7 +3283,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65266 if (unlikely(error))
65267 goto out;
65268
65269- error = do_last(nd, &path, file, op, &opened, pathname);
65270+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
65271 while (unlikely(error > 0)) { /* trailing symlink */
65272 struct path link = path;
65273 void *cookie;
65274@@ -3209,7 +3301,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65275 error = follow_link(&link, nd, &cookie);
65276 if (unlikely(error))
65277 break;
65278- error = do_last(nd, &path, file, op, &opened, pathname);
65279+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
65280 put_link(nd, &link, cookie);
65281 }
65282 out:
65283@@ -3309,9 +3401,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
65284 goto unlock;
65285
65286 error = -EEXIST;
65287- if (d_is_positive(dentry))
65288+ if (d_is_positive(dentry)) {
65289+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
65290+ error = -ENOENT;
65291 goto fail;
65292-
65293+ }
65294 /*
65295 * Special case - lookup gave negative, but... we had foo/bar/
65296 * From the vfs_mknod() POV we just have a negative dentry -
65297@@ -3363,6 +3457,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
65298 }
65299 EXPORT_SYMBOL(user_path_create);
65300
65301+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
65302+{
65303+ struct filename *tmp = getname(pathname);
65304+ struct dentry *res;
65305+ if (IS_ERR(tmp))
65306+ return ERR_CAST(tmp);
65307+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
65308+ if (IS_ERR(res))
65309+ putname(tmp);
65310+ else
65311+ *to = tmp;
65312+ return res;
65313+}
65314+
65315 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
65316 {
65317 int error = may_create(dir, dentry);
65318@@ -3426,6 +3534,17 @@ retry:
65319
65320 if (!IS_POSIXACL(path.dentry->d_inode))
65321 mode &= ~current_umask();
65322+
65323+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
65324+ error = -EPERM;
65325+ goto out;
65326+ }
65327+
65328+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
65329+ error = -EACCES;
65330+ goto out;
65331+ }
65332+
65333 error = security_path_mknod(&path, dentry, mode, dev);
65334 if (error)
65335 goto out;
65336@@ -3442,6 +3561,8 @@ retry:
65337 break;
65338 }
65339 out:
65340+ if (!error)
65341+ gr_handle_create(dentry, path.mnt);
65342 done_path_create(&path, dentry);
65343 if (retry_estale(error, lookup_flags)) {
65344 lookup_flags |= LOOKUP_REVAL;
65345@@ -3495,9 +3616,16 @@ retry:
65346
65347 if (!IS_POSIXACL(path.dentry->d_inode))
65348 mode &= ~current_umask();
65349+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
65350+ error = -EACCES;
65351+ goto out;
65352+ }
65353 error = security_path_mkdir(&path, dentry, mode);
65354 if (!error)
65355 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
65356+ if (!error)
65357+ gr_handle_create(dentry, path.mnt);
65358+out:
65359 done_path_create(&path, dentry);
65360 if (retry_estale(error, lookup_flags)) {
65361 lookup_flags |= LOOKUP_REVAL;
65362@@ -3580,6 +3708,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
65363 struct filename *name;
65364 struct dentry *dentry;
65365 struct nameidata nd;
65366+ ino_t saved_ino = 0;
65367+ dev_t saved_dev = 0;
65368 unsigned int lookup_flags = 0;
65369 retry:
65370 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
65371@@ -3612,10 +3742,21 @@ retry:
65372 error = -ENOENT;
65373 goto exit3;
65374 }
65375+
65376+ saved_ino = dentry->d_inode->i_ino;
65377+ saved_dev = gr_get_dev_from_dentry(dentry);
65378+
65379+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
65380+ error = -EACCES;
65381+ goto exit3;
65382+ }
65383+
65384 error = security_path_rmdir(&nd.path, dentry);
65385 if (error)
65386 goto exit3;
65387 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
65388+ if (!error && (saved_dev || saved_ino))
65389+ gr_handle_delete(saved_ino, saved_dev);
65390 exit3:
65391 dput(dentry);
65392 exit2:
65393@@ -3706,6 +3847,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
65394 struct nameidata nd;
65395 struct inode *inode = NULL;
65396 struct inode *delegated_inode = NULL;
65397+ ino_t saved_ino = 0;
65398+ dev_t saved_dev = 0;
65399 unsigned int lookup_flags = 0;
65400 retry:
65401 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
65402@@ -3732,10 +3875,22 @@ retry_deleg:
65403 if (d_is_negative(dentry))
65404 goto slashes;
65405 ihold(inode);
65406+
65407+ if (inode->i_nlink <= 1) {
65408+ saved_ino = inode->i_ino;
65409+ saved_dev = gr_get_dev_from_dentry(dentry);
65410+ }
65411+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
65412+ error = -EACCES;
65413+ goto exit2;
65414+ }
65415+
65416 error = security_path_unlink(&nd.path, dentry);
65417 if (error)
65418 goto exit2;
65419 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
65420+ if (!error && (saved_ino || saved_dev))
65421+ gr_handle_delete(saved_ino, saved_dev);
65422 exit2:
65423 dput(dentry);
65424 }
65425@@ -3824,9 +3979,17 @@ retry:
65426 if (IS_ERR(dentry))
65427 goto out_putname;
65428
65429+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
65430+ error = -EACCES;
65431+ goto out;
65432+ }
65433+
65434 error = security_path_symlink(&path, dentry, from->name);
65435 if (!error)
65436 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
65437+ if (!error)
65438+ gr_handle_create(dentry, path.mnt);
65439+out:
65440 done_path_create(&path, dentry);
65441 if (retry_estale(error, lookup_flags)) {
65442 lookup_flags |= LOOKUP_REVAL;
65443@@ -3930,6 +4093,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
65444 struct dentry *new_dentry;
65445 struct path old_path, new_path;
65446 struct inode *delegated_inode = NULL;
65447+ struct filename *to = NULL;
65448 int how = 0;
65449 int error;
65450
65451@@ -3953,7 +4117,7 @@ retry:
65452 if (error)
65453 return error;
65454
65455- new_dentry = user_path_create(newdfd, newname, &new_path,
65456+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
65457 (how & LOOKUP_REVAL));
65458 error = PTR_ERR(new_dentry);
65459 if (IS_ERR(new_dentry))
65460@@ -3965,11 +4129,28 @@ retry:
65461 error = may_linkat(&old_path);
65462 if (unlikely(error))
65463 goto out_dput;
65464+
65465+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
65466+ old_path.dentry->d_inode,
65467+ old_path.dentry->d_inode->i_mode, to)) {
65468+ error = -EACCES;
65469+ goto out_dput;
65470+ }
65471+
65472+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
65473+ old_path.dentry, old_path.mnt, to)) {
65474+ error = -EACCES;
65475+ goto out_dput;
65476+ }
65477+
65478 error = security_path_link(old_path.dentry, &new_path, new_dentry);
65479 if (error)
65480 goto out_dput;
65481 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
65482+ if (!error)
65483+ gr_handle_create(new_dentry, new_path.mnt);
65484 out_dput:
65485+ putname(to);
65486 done_path_create(&new_path, new_dentry);
65487 if (delegated_inode) {
65488 error = break_deleg_wait(&delegated_inode);
65489@@ -4279,6 +4460,12 @@ retry_deleg:
65490 if (new_dentry == trap)
65491 goto exit5;
65492
65493+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
65494+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
65495+ to, flags);
65496+ if (error)
65497+ goto exit5;
65498+
65499 error = security_path_rename(&oldnd.path, old_dentry,
65500 &newnd.path, new_dentry, flags);
65501 if (error)
65502@@ -4286,6 +4473,9 @@ retry_deleg:
65503 error = vfs_rename(old_dir->d_inode, old_dentry,
65504 new_dir->d_inode, new_dentry,
65505 &delegated_inode, flags);
65506+ if (!error)
65507+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
65508+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
65509 exit5:
65510 dput(new_dentry);
65511 exit4:
65512@@ -4328,14 +4518,24 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
65513
65514 int readlink_copy(char __user *buffer, int buflen, const char *link)
65515 {
65516+ char tmpbuf[64];
65517+ const char *newlink;
65518 int len = PTR_ERR(link);
65519+
65520 if (IS_ERR(link))
65521 goto out;
65522
65523 len = strlen(link);
65524 if (len > (unsigned) buflen)
65525 len = buflen;
65526- if (copy_to_user(buffer, link, len))
65527+
65528+ if (len < sizeof(tmpbuf)) {
65529+ memcpy(tmpbuf, link, len);
65530+ newlink = tmpbuf;
65531+ } else
65532+ newlink = link;
65533+
65534+ if (copy_to_user(buffer, newlink, len))
65535 len = -EFAULT;
65536 out:
65537 return len;
65538diff --git a/fs/namespace.c b/fs/namespace.c
65539index 182bc41..72e3cf1 100644
65540--- a/fs/namespace.c
65541+++ b/fs/namespace.c
65542@@ -1348,6 +1348,9 @@ static int do_umount(struct mount *mnt, int flags)
65543 if (!(sb->s_flags & MS_RDONLY))
65544 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
65545 up_write(&sb->s_umount);
65546+
65547+ gr_log_remount(mnt->mnt_devname, retval);
65548+
65549 return retval;
65550 }
65551
65552@@ -1370,6 +1373,9 @@ static int do_umount(struct mount *mnt, int flags)
65553 }
65554 unlock_mount_hash();
65555 namespace_unlock();
65556+
65557+ gr_log_unmount(mnt->mnt_devname, retval);
65558+
65559 return retval;
65560 }
65561
65562@@ -1389,7 +1395,7 @@ static inline bool may_mount(void)
65563 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
65564 */
65565
65566-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
65567+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
65568 {
65569 struct path path;
65570 struct mount *mnt;
65571@@ -1431,7 +1437,7 @@ out:
65572 /*
65573 * The 2.0 compatible umount. No flags.
65574 */
65575-SYSCALL_DEFINE1(oldumount, char __user *, name)
65576+SYSCALL_DEFINE1(oldumount, const char __user *, name)
65577 {
65578 return sys_umount(name, 0);
65579 }
65580@@ -2440,6 +2446,16 @@ long do_mount(const char *dev_name, const char *dir_name,
65581 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
65582 MS_STRICTATIME);
65583
65584+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
65585+ retval = -EPERM;
65586+ goto dput_out;
65587+ }
65588+
65589+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
65590+ retval = -EPERM;
65591+ goto dput_out;
65592+ }
65593+
65594 if (flags & MS_REMOUNT)
65595 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
65596 data_page);
65597@@ -2454,6 +2470,9 @@ long do_mount(const char *dev_name, const char *dir_name,
65598 dev_name, data_page);
65599 dput_out:
65600 path_put(&path);
65601+
65602+ gr_log_mount(dev_name, dir_name, retval);
65603+
65604 return retval;
65605 }
65606
65607@@ -2471,7 +2490,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
65608 * number incrementing at 10Ghz will take 12,427 years to wrap which
65609 * is effectively never, so we can ignore the possibility.
65610 */
65611-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
65612+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
65613
65614 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65615 {
65616@@ -2486,7 +2505,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65617 kfree(new_ns);
65618 return ERR_PTR(ret);
65619 }
65620- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
65621+ new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
65622 atomic_set(&new_ns->count, 1);
65623 new_ns->root = NULL;
65624 INIT_LIST_HEAD(&new_ns->list);
65625@@ -2496,7 +2515,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65626 return new_ns;
65627 }
65628
65629-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65630+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65631 struct user_namespace *user_ns, struct fs_struct *new_fs)
65632 {
65633 struct mnt_namespace *new_ns;
65634@@ -2617,8 +2636,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
65635 }
65636 EXPORT_SYMBOL(mount_subtree);
65637
65638-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
65639- char __user *, type, unsigned long, flags, void __user *, data)
65640+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
65641+ const char __user *, type, unsigned long, flags, void __user *, data)
65642 {
65643 int ret;
65644 char *kernel_type;
65645@@ -2731,6 +2750,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
65646 if (error)
65647 goto out2;
65648
65649+ if (gr_handle_chroot_pivot()) {
65650+ error = -EPERM;
65651+ goto out2;
65652+ }
65653+
65654 get_fs_root(current->fs, &root);
65655 old_mp = lock_mount(&old);
65656 error = PTR_ERR(old_mp);
65657@@ -2999,7 +3023,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
65658 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
65659 return -EPERM;
65660
65661- if (fs->users != 1)
65662+ if (atomic_read(&fs->users) != 1)
65663 return -EINVAL;
65664
65665 get_mnt_ns(mnt_ns);
65666diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
65667index f4ccfe6..a5cf064 100644
65668--- a/fs/nfs/callback_xdr.c
65669+++ b/fs/nfs/callback_xdr.c
65670@@ -51,7 +51,7 @@ struct callback_op {
65671 callback_decode_arg_t decode_args;
65672 callback_encode_res_t encode_res;
65673 long res_maxsize;
65674-};
65675+} __do_const;
65676
65677 static struct callback_op callback_ops[];
65678
65679diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
65680index 9927913..faffc5c 100644
65681--- a/fs/nfs/inode.c
65682+++ b/fs/nfs/inode.c
65683@@ -1219,16 +1219,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
65684 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
65685 }
65686
65687-static atomic_long_t nfs_attr_generation_counter;
65688+static atomic_long_unchecked_t nfs_attr_generation_counter;
65689
65690 static unsigned long nfs_read_attr_generation_counter(void)
65691 {
65692- return atomic_long_read(&nfs_attr_generation_counter);
65693+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
65694 }
65695
65696 unsigned long nfs_inc_attr_generation_counter(void)
65697 {
65698- return atomic_long_inc_return(&nfs_attr_generation_counter);
65699+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
65700 }
65701
65702 void nfs_fattr_init(struct nfs_fattr *fattr)
65703diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
65704index 8f854dd..d0fec26 100644
65705--- a/fs/nfs/nfs3acl.c
65706+++ b/fs/nfs/nfs3acl.c
65707@@ -256,7 +256,7 @@ nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
65708 char *p = data + *result;
65709
65710 acl = get_acl(inode, type);
65711- if (!acl)
65712+ if (IS_ERR_OR_NULL(acl))
65713 return 0;
65714
65715 posix_acl_release(acl);
65716diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
65717index 8f029db..3688b84 100644
65718--- a/fs/nfsd/nfs4proc.c
65719+++ b/fs/nfsd/nfs4proc.c
65720@@ -1157,7 +1157,7 @@ struct nfsd4_operation {
65721 nfsd4op_rsize op_rsize_bop;
65722 stateid_getter op_get_currentstateid;
65723 stateid_setter op_set_currentstateid;
65724-};
65725+} __do_const;
65726
65727 static struct nfsd4_operation nfsd4_ops[];
65728
65729diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
65730index 944275c..6fc40a7 100644
65731--- a/fs/nfsd/nfs4xdr.c
65732+++ b/fs/nfsd/nfs4xdr.c
65733@@ -1539,7 +1539,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
65734
65735 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
65736
65737-static nfsd4_dec nfsd4_dec_ops[] = {
65738+static const nfsd4_dec nfsd4_dec_ops[] = {
65739 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
65740 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
65741 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
65742diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
65743index 6040da8..4348565 100644
65744--- a/fs/nfsd/nfscache.c
65745+++ b/fs/nfsd/nfscache.c
65746@@ -518,17 +518,20 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
65747 {
65748 struct svc_cacherep *rp = rqstp->rq_cacherep;
65749 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
65750- int len;
65751+ long len;
65752 size_t bufsize = 0;
65753
65754 if (!rp)
65755 return;
65756
65757- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
65758- len >>= 2;
65759+ if (statp) {
65760+ len = (char*)statp - (char*)resv->iov_base;
65761+ len = resv->iov_len - len;
65762+ len >>= 2;
65763+ }
65764
65765 /* Don't cache excessive amounts of data and XDR failures */
65766- if (!statp || len > (256 >> 2)) {
65767+ if (!statp || len > (256 >> 2) || len < 0) {
65768 nfsd_reply_cache_free(rp);
65769 return;
65770 }
65771@@ -536,7 +539,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
65772 switch (cachetype) {
65773 case RC_REPLSTAT:
65774 if (len != 1)
65775- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
65776+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
65777 rp->c_replstat = *statp;
65778 break;
65779 case RC_REPLBUFF:
65780diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
65781index 140c496..e9cbf14 100644
65782--- a/fs/nfsd/vfs.c
65783+++ b/fs/nfsd/vfs.c
65784@@ -855,7 +855,7 @@ int nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
65785
65786 oldfs = get_fs();
65787 set_fs(KERNEL_DS);
65788- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
65789+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
65790 set_fs(oldfs);
65791 return nfsd_finish_read(file, count, host_err);
65792 }
65793@@ -943,7 +943,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
65794
65795 /* Write the data. */
65796 oldfs = get_fs(); set_fs(KERNEL_DS);
65797- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
65798+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
65799 set_fs(oldfs);
65800 if (host_err < 0)
65801 goto out_nfserr;
65802@@ -1482,7 +1482,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
65803 */
65804
65805 oldfs = get_fs(); set_fs(KERNEL_DS);
65806- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
65807+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
65808 set_fs(oldfs);
65809
65810 if (host_err < 0)
65811diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
65812index 52ccd34..7a6b202 100644
65813--- a/fs/nls/nls_base.c
65814+++ b/fs/nls/nls_base.c
65815@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
65816
65817 int __register_nls(struct nls_table *nls, struct module *owner)
65818 {
65819- struct nls_table ** tmp = &tables;
65820+ struct nls_table *tmp = tables;
65821
65822 if (nls->next)
65823 return -EBUSY;
65824
65825- nls->owner = owner;
65826+ pax_open_kernel();
65827+ *(void **)&nls->owner = owner;
65828+ pax_close_kernel();
65829 spin_lock(&nls_lock);
65830- while (*tmp) {
65831- if (nls == *tmp) {
65832+ while (tmp) {
65833+ if (nls == tmp) {
65834 spin_unlock(&nls_lock);
65835 return -EBUSY;
65836 }
65837- tmp = &(*tmp)->next;
65838+ tmp = tmp->next;
65839 }
65840- nls->next = tables;
65841+ pax_open_kernel();
65842+ *(struct nls_table **)&nls->next = tables;
65843+ pax_close_kernel();
65844 tables = nls;
65845 spin_unlock(&nls_lock);
65846 return 0;
65847@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
65848
65849 int unregister_nls(struct nls_table * nls)
65850 {
65851- struct nls_table ** tmp = &tables;
65852+ struct nls_table * const * tmp = &tables;
65853
65854 spin_lock(&nls_lock);
65855 while (*tmp) {
65856 if (nls == *tmp) {
65857- *tmp = nls->next;
65858+ pax_open_kernel();
65859+ *(struct nls_table **)tmp = nls->next;
65860+ pax_close_kernel();
65861 spin_unlock(&nls_lock);
65862 return 0;
65863 }
65864@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
65865 return -EINVAL;
65866 }
65867
65868-static struct nls_table *find_nls(char *charset)
65869+static struct nls_table *find_nls(const char *charset)
65870 {
65871 struct nls_table *nls;
65872 spin_lock(&nls_lock);
65873@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
65874 return nls;
65875 }
65876
65877-struct nls_table *load_nls(char *charset)
65878+struct nls_table *load_nls(const char *charset)
65879 {
65880 return try_then_request_module(find_nls(charset), "nls_%s", charset);
65881 }
65882diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
65883index 162b3f1..6076a7c 100644
65884--- a/fs/nls/nls_euc-jp.c
65885+++ b/fs/nls/nls_euc-jp.c
65886@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
65887 p_nls = load_nls("cp932");
65888
65889 if (p_nls) {
65890- table.charset2upper = p_nls->charset2upper;
65891- table.charset2lower = p_nls->charset2lower;
65892+ pax_open_kernel();
65893+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
65894+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
65895+ pax_close_kernel();
65896 return register_nls(&table);
65897 }
65898
65899diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
65900index a80a741..7b96e1b 100644
65901--- a/fs/nls/nls_koi8-ru.c
65902+++ b/fs/nls/nls_koi8-ru.c
65903@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
65904 p_nls = load_nls("koi8-u");
65905
65906 if (p_nls) {
65907- table.charset2upper = p_nls->charset2upper;
65908- table.charset2lower = p_nls->charset2lower;
65909+ pax_open_kernel();
65910+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
65911+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
65912+ pax_close_kernel();
65913 return register_nls(&table);
65914 }
65915
65916diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
65917index 3fdc8a3..5888623 100644
65918--- a/fs/notify/fanotify/fanotify_user.c
65919+++ b/fs/notify/fanotify/fanotify_user.c
65920@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
65921
65922 fd = fanotify_event_metadata.fd;
65923 ret = -EFAULT;
65924- if (copy_to_user(buf, &fanotify_event_metadata,
65925- fanotify_event_metadata.event_len))
65926+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
65927+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
65928 goto out_close_fd;
65929
65930 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
65931diff --git a/fs/notify/notification.c b/fs/notify/notification.c
65932index 1e58402..bb2d6f4 100644
65933--- a/fs/notify/notification.c
65934+++ b/fs/notify/notification.c
65935@@ -48,7 +48,7 @@
65936 #include <linux/fsnotify_backend.h>
65937 #include "fsnotify.h"
65938
65939-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65940+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65941
65942 /**
65943 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
65944@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65945 */
65946 u32 fsnotify_get_cookie(void)
65947 {
65948- return atomic_inc_return(&fsnotify_sync_cookie);
65949+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
65950 }
65951 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
65952
65953diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
65954index 9e38daf..5727cae 100644
65955--- a/fs/ntfs/dir.c
65956+++ b/fs/ntfs/dir.c
65957@@ -1310,7 +1310,7 @@ find_next_index_buffer:
65958 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
65959 ~(s64)(ndir->itype.index.block_size - 1)));
65960 /* Bounds checks. */
65961- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65962+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65963 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
65964 "inode 0x%lx or driver bug.", vdir->i_ino);
65965 goto err_out;
65966diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
65967index 5c9e2c8..96e4ba0 100644
65968--- a/fs/ntfs/file.c
65969+++ b/fs/ntfs/file.c
65970@@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
65971 char *addr;
65972 size_t total = 0;
65973 unsigned len;
65974- int left;
65975+ unsigned left;
65976
65977 do {
65978 len = PAGE_CACHE_SIZE - ofs;
65979diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
65980index 6c3296e..c0b99f0 100644
65981--- a/fs/ntfs/super.c
65982+++ b/fs/ntfs/super.c
65983@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65984 if (!silent)
65985 ntfs_error(sb, "Primary boot sector is invalid.");
65986 } else if (!silent)
65987- ntfs_error(sb, read_err_str, "primary");
65988+ ntfs_error(sb, read_err_str, "%s", "primary");
65989 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
65990 if (bh_primary)
65991 brelse(bh_primary);
65992@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65993 goto hotfix_primary_boot_sector;
65994 brelse(bh_backup);
65995 } else if (!silent)
65996- ntfs_error(sb, read_err_str, "backup");
65997+ ntfs_error(sb, read_err_str, "%s", "backup");
65998 /* Try to read NT3.51- backup boot sector. */
65999 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
66000 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
66001@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66002 "sector.");
66003 brelse(bh_backup);
66004 } else if (!silent)
66005- ntfs_error(sb, read_err_str, "backup");
66006+ ntfs_error(sb, read_err_str, "%s", "backup");
66007 /* We failed. Cleanup and return. */
66008 if (bh_primary)
66009 brelse(bh_primary);
66010diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
66011index 0440134..d52c93a 100644
66012--- a/fs/ocfs2/localalloc.c
66013+++ b/fs/ocfs2/localalloc.c
66014@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
66015 goto bail;
66016 }
66017
66018- atomic_inc(&osb->alloc_stats.moves);
66019+ atomic_inc_unchecked(&osb->alloc_stats.moves);
66020
66021 bail:
66022 if (handle)
66023diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
66024index bbec539..7b266d5 100644
66025--- a/fs/ocfs2/ocfs2.h
66026+++ b/fs/ocfs2/ocfs2.h
66027@@ -236,11 +236,11 @@ enum ocfs2_vol_state
66028
66029 struct ocfs2_alloc_stats
66030 {
66031- atomic_t moves;
66032- atomic_t local_data;
66033- atomic_t bitmap_data;
66034- atomic_t bg_allocs;
66035- atomic_t bg_extends;
66036+ atomic_unchecked_t moves;
66037+ atomic_unchecked_t local_data;
66038+ atomic_unchecked_t bitmap_data;
66039+ atomic_unchecked_t bg_allocs;
66040+ atomic_unchecked_t bg_extends;
66041 };
66042
66043 enum ocfs2_local_alloc_state
66044diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
66045index 0cb889a..6a26b24 100644
66046--- a/fs/ocfs2/suballoc.c
66047+++ b/fs/ocfs2/suballoc.c
66048@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
66049 mlog_errno(status);
66050 goto bail;
66051 }
66052- atomic_inc(&osb->alloc_stats.bg_extends);
66053+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
66054
66055 /* You should never ask for this much metadata */
66056 BUG_ON(bits_wanted >
66057@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
66058 mlog_errno(status);
66059 goto bail;
66060 }
66061- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66062+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66063
66064 *suballoc_loc = res.sr_bg_blkno;
66065 *suballoc_bit_start = res.sr_bit_offset;
66066@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
66067 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
66068 res->sr_bits);
66069
66070- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66071+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66072
66073 BUG_ON(res->sr_bits != 1);
66074
66075@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
66076 mlog_errno(status);
66077 goto bail;
66078 }
66079- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66080+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66081
66082 BUG_ON(res.sr_bits != 1);
66083
66084@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
66085 cluster_start,
66086 num_clusters);
66087 if (!status)
66088- atomic_inc(&osb->alloc_stats.local_data);
66089+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
66090 } else {
66091 if (min_clusters > (osb->bitmap_cpg - 1)) {
66092 /* The only paths asking for contiguousness
66093@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
66094 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
66095 res.sr_bg_blkno,
66096 res.sr_bit_offset);
66097- atomic_inc(&osb->alloc_stats.bitmap_data);
66098+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
66099 *num_clusters = res.sr_bits;
66100 }
66101 }
66102diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
66103index ddb662b..f701c83 100644
66104--- a/fs/ocfs2/super.c
66105+++ b/fs/ocfs2/super.c
66106@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
66107 "%10s => GlobalAllocs: %d LocalAllocs: %d "
66108 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
66109 "Stats",
66110- atomic_read(&osb->alloc_stats.bitmap_data),
66111- atomic_read(&osb->alloc_stats.local_data),
66112- atomic_read(&osb->alloc_stats.bg_allocs),
66113- atomic_read(&osb->alloc_stats.moves),
66114- atomic_read(&osb->alloc_stats.bg_extends));
66115+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
66116+ atomic_read_unchecked(&osb->alloc_stats.local_data),
66117+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
66118+ atomic_read_unchecked(&osb->alloc_stats.moves),
66119+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
66120
66121 out += snprintf(buf + out, len - out,
66122 "%10s => State: %u Descriptor: %llu Size: %u bits "
66123@@ -2100,11 +2100,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
66124
66125 mutex_init(&osb->system_file_mutex);
66126
66127- atomic_set(&osb->alloc_stats.moves, 0);
66128- atomic_set(&osb->alloc_stats.local_data, 0);
66129- atomic_set(&osb->alloc_stats.bitmap_data, 0);
66130- atomic_set(&osb->alloc_stats.bg_allocs, 0);
66131- atomic_set(&osb->alloc_stats.bg_extends, 0);
66132+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
66133+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
66134+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
66135+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
66136+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
66137
66138 /* Copy the blockcheck stats from the superblock probe */
66139 osb->osb_ecc_stats = *stats;
66140diff --git a/fs/open.c b/fs/open.c
66141index d6fd3ac..6ccf474 100644
66142--- a/fs/open.c
66143+++ b/fs/open.c
66144@@ -32,6 +32,8 @@
66145 #include <linux/dnotify.h>
66146 #include <linux/compat.h>
66147
66148+#define CREATE_TRACE_POINTS
66149+#include <trace/events/fs.h>
66150 #include "internal.h"
66151
66152 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
66153@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
66154 error = locks_verify_truncate(inode, NULL, length);
66155 if (!error)
66156 error = security_path_truncate(path);
66157+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
66158+ error = -EACCES;
66159 if (!error)
66160 error = do_truncate(path->dentry, length, 0, NULL);
66161
66162@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
66163 error = locks_verify_truncate(inode, f.file, length);
66164 if (!error)
66165 error = security_path_truncate(&f.file->f_path);
66166+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
66167+ error = -EACCES;
66168 if (!error)
66169 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
66170 sb_end_write(inode->i_sb);
66171@@ -380,6 +386,9 @@ retry:
66172 if (__mnt_is_readonly(path.mnt))
66173 res = -EROFS;
66174
66175+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
66176+ res = -EACCES;
66177+
66178 out_path_release:
66179 path_put(&path);
66180 if (retry_estale(res, lookup_flags)) {
66181@@ -411,6 +420,8 @@ retry:
66182 if (error)
66183 goto dput_and_out;
66184
66185+ gr_log_chdir(path.dentry, path.mnt);
66186+
66187 set_fs_pwd(current->fs, &path);
66188
66189 dput_and_out:
66190@@ -440,6 +451,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
66191 goto out_putf;
66192
66193 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
66194+
66195+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
66196+ error = -EPERM;
66197+
66198+ if (!error)
66199+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
66200+
66201 if (!error)
66202 set_fs_pwd(current->fs, &f.file->f_path);
66203 out_putf:
66204@@ -469,7 +487,13 @@ retry:
66205 if (error)
66206 goto dput_and_out;
66207
66208+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
66209+ goto dput_and_out;
66210+
66211 set_fs_root(current->fs, &path);
66212+
66213+ gr_handle_chroot_chdir(&path);
66214+
66215 error = 0;
66216 dput_and_out:
66217 path_put(&path);
66218@@ -493,6 +517,16 @@ static int chmod_common(struct path *path, umode_t mode)
66219 return error;
66220 retry_deleg:
66221 mutex_lock(&inode->i_mutex);
66222+
66223+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
66224+ error = -EACCES;
66225+ goto out_unlock;
66226+ }
66227+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
66228+ error = -EACCES;
66229+ goto out_unlock;
66230+ }
66231+
66232 error = security_path_chmod(path, mode);
66233 if (error)
66234 goto out_unlock;
66235@@ -558,6 +592,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
66236 uid = make_kuid(current_user_ns(), user);
66237 gid = make_kgid(current_user_ns(), group);
66238
66239+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
66240+ return -EACCES;
66241+
66242 newattrs.ia_valid = ATTR_CTIME;
66243 if (user != (uid_t) -1) {
66244 if (!uid_valid(uid))
66245@@ -983,6 +1020,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
66246 } else {
66247 fsnotify_open(f);
66248 fd_install(fd, f);
66249+ trace_do_sys_open(tmp->name, flags, mode);
66250 }
66251 }
66252 putname(tmp);
66253diff --git a/fs/pipe.c b/fs/pipe.c
66254index 21981e5..3d5f55c 100644
66255--- a/fs/pipe.c
66256+++ b/fs/pipe.c
66257@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
66258
66259 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
66260 {
66261- if (pipe->files)
66262+ if (atomic_read(&pipe->files))
66263 mutex_lock_nested(&pipe->mutex, subclass);
66264 }
66265
66266@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
66267
66268 void pipe_unlock(struct pipe_inode_info *pipe)
66269 {
66270- if (pipe->files)
66271+ if (atomic_read(&pipe->files))
66272 mutex_unlock(&pipe->mutex);
66273 }
66274 EXPORT_SYMBOL(pipe_unlock);
66275@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
66276 }
66277 if (bufs) /* More to do? */
66278 continue;
66279- if (!pipe->writers)
66280+ if (!atomic_read(&pipe->writers))
66281 break;
66282- if (!pipe->waiting_writers) {
66283+ if (!atomic_read(&pipe->waiting_writers)) {
66284 /* syscall merging: Usually we must not sleep
66285 * if O_NONBLOCK is set, or if we got some data.
66286 * But if a writer sleeps in kernel space, then
66287@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66288
66289 __pipe_lock(pipe);
66290
66291- if (!pipe->readers) {
66292+ if (!atomic_read(&pipe->readers)) {
66293 send_sig(SIGPIPE, current, 0);
66294 ret = -EPIPE;
66295 goto out;
66296@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66297 for (;;) {
66298 int bufs;
66299
66300- if (!pipe->readers) {
66301+ if (!atomic_read(&pipe->readers)) {
66302 send_sig(SIGPIPE, current, 0);
66303 if (!ret)
66304 ret = -EPIPE;
66305@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66306 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66307 do_wakeup = 0;
66308 }
66309- pipe->waiting_writers++;
66310+ atomic_inc(&pipe->waiting_writers);
66311 pipe_wait(pipe);
66312- pipe->waiting_writers--;
66313+ atomic_dec(&pipe->waiting_writers);
66314 }
66315 out:
66316 __pipe_unlock(pipe);
66317@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66318 mask = 0;
66319 if (filp->f_mode & FMODE_READ) {
66320 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
66321- if (!pipe->writers && filp->f_version != pipe->w_counter)
66322+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
66323 mask |= POLLHUP;
66324 }
66325
66326@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66327 * Most Unices do not set POLLERR for FIFOs but on Linux they
66328 * behave exactly like pipes for poll().
66329 */
66330- if (!pipe->readers)
66331+ if (!atomic_read(&pipe->readers))
66332 mask |= POLLERR;
66333 }
66334
66335@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
66336 int kill = 0;
66337
66338 spin_lock(&inode->i_lock);
66339- if (!--pipe->files) {
66340+ if (atomic_dec_and_test(&pipe->files)) {
66341 inode->i_pipe = NULL;
66342 kill = 1;
66343 }
66344@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
66345
66346 __pipe_lock(pipe);
66347 if (file->f_mode & FMODE_READ)
66348- pipe->readers--;
66349+ atomic_dec(&pipe->readers);
66350 if (file->f_mode & FMODE_WRITE)
66351- pipe->writers--;
66352+ atomic_dec(&pipe->writers);
66353
66354- if (pipe->readers || pipe->writers) {
66355+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
66356 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
66357 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66358 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
66359@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
66360 kfree(pipe);
66361 }
66362
66363-static struct vfsmount *pipe_mnt __read_mostly;
66364+struct vfsmount *pipe_mnt __read_mostly;
66365
66366 /*
66367 * pipefs_dname() is called from d_path().
66368@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
66369 goto fail_iput;
66370
66371 inode->i_pipe = pipe;
66372- pipe->files = 2;
66373- pipe->readers = pipe->writers = 1;
66374+ atomic_set(&pipe->files, 2);
66375+ atomic_set(&pipe->readers, 1);
66376+ atomic_set(&pipe->writers, 1);
66377 inode->i_fop = &pipefifo_fops;
66378
66379 /*
66380@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
66381 spin_lock(&inode->i_lock);
66382 if (inode->i_pipe) {
66383 pipe = inode->i_pipe;
66384- pipe->files++;
66385+ atomic_inc(&pipe->files);
66386 spin_unlock(&inode->i_lock);
66387 } else {
66388 spin_unlock(&inode->i_lock);
66389 pipe = alloc_pipe_info();
66390 if (!pipe)
66391 return -ENOMEM;
66392- pipe->files = 1;
66393+ atomic_set(&pipe->files, 1);
66394 spin_lock(&inode->i_lock);
66395 if (unlikely(inode->i_pipe)) {
66396- inode->i_pipe->files++;
66397+ atomic_inc(&inode->i_pipe->files);
66398 spin_unlock(&inode->i_lock);
66399 free_pipe_info(pipe);
66400 pipe = inode->i_pipe;
66401@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
66402 * opened, even when there is no process writing the FIFO.
66403 */
66404 pipe->r_counter++;
66405- if (pipe->readers++ == 0)
66406+ if (atomic_inc_return(&pipe->readers) == 1)
66407 wake_up_partner(pipe);
66408
66409- if (!is_pipe && !pipe->writers) {
66410+ if (!is_pipe && !atomic_read(&pipe->writers)) {
66411 if ((filp->f_flags & O_NONBLOCK)) {
66412 /* suppress POLLHUP until we have
66413 * seen a writer */
66414@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
66415 * errno=ENXIO when there is no process reading the FIFO.
66416 */
66417 ret = -ENXIO;
66418- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
66419+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
66420 goto err;
66421
66422 pipe->w_counter++;
66423- if (!pipe->writers++)
66424+ if (atomic_inc_return(&pipe->writers) == 1)
66425 wake_up_partner(pipe);
66426
66427- if (!is_pipe && !pipe->readers) {
66428+ if (!is_pipe && !atomic_read(&pipe->readers)) {
66429 if (wait_for_partner(pipe, &pipe->r_counter))
66430 goto err_wr;
66431 }
66432@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
66433 * the process can at least talk to itself.
66434 */
66435
66436- pipe->readers++;
66437- pipe->writers++;
66438+ atomic_inc(&pipe->readers);
66439+ atomic_inc(&pipe->writers);
66440 pipe->r_counter++;
66441 pipe->w_counter++;
66442- if (pipe->readers == 1 || pipe->writers == 1)
66443+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
66444 wake_up_partner(pipe);
66445 break;
66446
66447@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
66448 return 0;
66449
66450 err_rd:
66451- if (!--pipe->readers)
66452+ if (atomic_dec_and_test(&pipe->readers))
66453 wake_up_interruptible(&pipe->wait);
66454 ret = -ERESTARTSYS;
66455 goto err;
66456
66457 err_wr:
66458- if (!--pipe->writers)
66459+ if (atomic_dec_and_test(&pipe->writers))
66460 wake_up_interruptible(&pipe->wait);
66461 ret = -ERESTARTSYS;
66462 goto err;
66463diff --git a/fs/posix_acl.c b/fs/posix_acl.c
66464index 0855f77..6787d50 100644
66465--- a/fs/posix_acl.c
66466+++ b/fs/posix_acl.c
66467@@ -20,6 +20,7 @@
66468 #include <linux/xattr.h>
66469 #include <linux/export.h>
66470 #include <linux/user_namespace.h>
66471+#include <linux/grsecurity.h>
66472
66473 struct posix_acl **acl_by_type(struct inode *inode, int type)
66474 {
66475@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
66476 }
66477 }
66478 if (mode_p)
66479- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
66480+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
66481 return not_equiv;
66482 }
66483 EXPORT_SYMBOL(posix_acl_equiv_mode);
66484@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
66485 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
66486 }
66487
66488- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
66489+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
66490 return not_equiv;
66491 }
66492
66493@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
66494 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
66495 int err = -ENOMEM;
66496 if (clone) {
66497+ *mode_p &= ~gr_acl_umask();
66498+
66499 err = posix_acl_create_masq(clone, mode_p);
66500 if (err < 0) {
66501 posix_acl_release(clone);
66502@@ -659,11 +662,12 @@ struct posix_acl *
66503 posix_acl_from_xattr(struct user_namespace *user_ns,
66504 const void *value, size_t size)
66505 {
66506- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
66507- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
66508+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
66509+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
66510 int count;
66511 struct posix_acl *acl;
66512 struct posix_acl_entry *acl_e;
66513+ umode_t umask = gr_acl_umask();
66514
66515 if (!value)
66516 return NULL;
66517@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
66518
66519 switch(acl_e->e_tag) {
66520 case ACL_USER_OBJ:
66521+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
66522+ break;
66523 case ACL_GROUP_OBJ:
66524 case ACL_MASK:
66525+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
66526+ break;
66527 case ACL_OTHER:
66528+ acl_e->e_perm &= ~(umask & S_IRWXO);
66529 break;
66530
66531 case ACL_USER:
66532+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
66533 acl_e->e_uid =
66534 make_kuid(user_ns,
66535 le32_to_cpu(entry->e_id));
66536@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
66537 goto fail;
66538 break;
66539 case ACL_GROUP:
66540+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
66541 acl_e->e_gid =
66542 make_kgid(user_ns,
66543 le32_to_cpu(entry->e_id));
66544diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
66545index 2183fcf..3c32a98 100644
66546--- a/fs/proc/Kconfig
66547+++ b/fs/proc/Kconfig
66548@@ -30,7 +30,7 @@ config PROC_FS
66549
66550 config PROC_KCORE
66551 bool "/proc/kcore support" if !ARM
66552- depends on PROC_FS && MMU
66553+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
66554 help
66555 Provides a virtual ELF core file of the live kernel. This can
66556 be read with gdb and other ELF tools. No modifications can be
66557@@ -38,8 +38,8 @@ config PROC_KCORE
66558
66559 config PROC_VMCORE
66560 bool "/proc/vmcore support"
66561- depends on PROC_FS && CRASH_DUMP
66562- default y
66563+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
66564+ default n
66565 help
66566 Exports the dump image of crashed kernel in ELF format.
66567
66568@@ -63,8 +63,8 @@ config PROC_SYSCTL
66569 limited in memory.
66570
66571 config PROC_PAGE_MONITOR
66572- default y
66573- depends on PROC_FS && MMU
66574+ default n
66575+ depends on PROC_FS && MMU && !GRKERNSEC
66576 bool "Enable /proc page monitoring" if EXPERT
66577 help
66578 Various /proc files exist to monitor process memory utilization:
66579diff --git a/fs/proc/array.c b/fs/proc/array.c
66580index 64db2bc..a8185d6 100644
66581--- a/fs/proc/array.c
66582+++ b/fs/proc/array.c
66583@@ -60,6 +60,7 @@
66584 #include <linux/tty.h>
66585 #include <linux/string.h>
66586 #include <linux/mman.h>
66587+#include <linux/grsecurity.h>
66588 #include <linux/proc_fs.h>
66589 #include <linux/ioport.h>
66590 #include <linux/uaccess.h>
66591@@ -356,6 +357,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
66592 seq_putc(m, '\n');
66593 }
66594
66595+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66596+static inline void task_pax(struct seq_file *m, struct task_struct *p)
66597+{
66598+ if (p->mm)
66599+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
66600+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
66601+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
66602+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
66603+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
66604+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
66605+ else
66606+ seq_printf(m, "PaX:\t-----\n");
66607+}
66608+#endif
66609+
66610 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66611 struct pid *pid, struct task_struct *task)
66612 {
66613@@ -374,9 +390,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66614 task_cpus_allowed(m, task);
66615 cpuset_task_status_allowed(m, task);
66616 task_context_switch_counts(m, task);
66617+
66618+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66619+ task_pax(m, task);
66620+#endif
66621+
66622+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
66623+ task_grsec_rbac(m, task);
66624+#endif
66625+
66626 return 0;
66627 }
66628
66629+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66630+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66631+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66632+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66633+#endif
66634+
66635 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66636 struct pid *pid, struct task_struct *task, int whole)
66637 {
66638@@ -398,6 +429,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66639 char tcomm[sizeof(task->comm)];
66640 unsigned long flags;
66641
66642+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66643+ if (current->exec_id != m->exec_id) {
66644+ gr_log_badprocpid("stat");
66645+ return 0;
66646+ }
66647+#endif
66648+
66649 state = *get_task_state(task);
66650 vsize = eip = esp = 0;
66651 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
66652@@ -468,6 +506,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66653 gtime = task_gtime(task);
66654 }
66655
66656+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66657+ if (PAX_RAND_FLAGS(mm)) {
66658+ eip = 0;
66659+ esp = 0;
66660+ wchan = 0;
66661+ }
66662+#endif
66663+#ifdef CONFIG_GRKERNSEC_HIDESYM
66664+ wchan = 0;
66665+ eip =0;
66666+ esp =0;
66667+#endif
66668+
66669 /* scale priority and nice values from timeslices to -20..20 */
66670 /* to make it look like a "normal" Unix priority/nice value */
66671 priority = task_prio(task);
66672@@ -504,9 +555,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66673 seq_put_decimal_ull(m, ' ', vsize);
66674 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
66675 seq_put_decimal_ull(m, ' ', rsslim);
66676+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66677+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
66678+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
66679+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
66680+#else
66681 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
66682 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
66683 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
66684+#endif
66685 seq_put_decimal_ull(m, ' ', esp);
66686 seq_put_decimal_ull(m, ' ', eip);
66687 /* The signal information here is obsolete.
66688@@ -528,7 +585,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66689 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
66690 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
66691
66692- if (mm && permitted) {
66693+ if (mm && permitted
66694+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66695+ && !PAX_RAND_FLAGS(mm)
66696+#endif
66697+ ) {
66698 seq_put_decimal_ull(m, ' ', mm->start_data);
66699 seq_put_decimal_ull(m, ' ', mm->end_data);
66700 seq_put_decimal_ull(m, ' ', mm->start_brk);
66701@@ -566,8 +627,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
66702 struct pid *pid, struct task_struct *task)
66703 {
66704 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
66705- struct mm_struct *mm = get_task_mm(task);
66706+ struct mm_struct *mm;
66707
66708+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66709+ if (current->exec_id != m->exec_id) {
66710+ gr_log_badprocpid("statm");
66711+ return 0;
66712+ }
66713+#endif
66714+ mm = get_task_mm(task);
66715 if (mm) {
66716 size = task_statm(mm, &shared, &text, &data, &resident);
66717 mmput(mm);
66718@@ -590,6 +658,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
66719 return 0;
66720 }
66721
66722+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66723+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
66724+{
66725+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
66726+}
66727+#endif
66728+
66729 #ifdef CONFIG_CHECKPOINT_RESTORE
66730 static struct pid *
66731 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
66732diff --git a/fs/proc/base.c b/fs/proc/base.c
66733index 2d696b0..b9da447 100644
66734--- a/fs/proc/base.c
66735+++ b/fs/proc/base.c
66736@@ -113,6 +113,14 @@ struct pid_entry {
66737 union proc_op op;
66738 };
66739
66740+struct getdents_callback {
66741+ struct linux_dirent __user * current_dir;
66742+ struct linux_dirent __user * previous;
66743+ struct file * file;
66744+ int count;
66745+ int error;
66746+};
66747+
66748 #define NOD(NAME, MODE, IOP, FOP, OP) { \
66749 .name = (NAME), \
66750 .len = sizeof(NAME) - 1, \
66751@@ -205,12 +213,28 @@ static int proc_pid_cmdline(struct task_struct *task, char *buffer)
66752 return get_cmdline(task, buffer, PAGE_SIZE);
66753 }
66754
66755+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66756+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66757+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66758+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66759+#endif
66760+
66761 static int proc_pid_auxv(struct task_struct *task, char *buffer)
66762 {
66763 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
66764 int res = PTR_ERR(mm);
66765 if (mm && !IS_ERR(mm)) {
66766 unsigned int nwords = 0;
66767+
66768+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66769+ /* allow if we're currently ptracing this task */
66770+ if (PAX_RAND_FLAGS(mm) &&
66771+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
66772+ mmput(mm);
66773+ return 0;
66774+ }
66775+#endif
66776+
66777 do {
66778 nwords += 2;
66779 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
66780@@ -224,7 +248,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
66781 }
66782
66783
66784-#ifdef CONFIG_KALLSYMS
66785+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66786 /*
66787 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
66788 * Returns the resolved symbol. If that fails, simply return the address.
66789@@ -263,7 +287,7 @@ static void unlock_trace(struct task_struct *task)
66790 mutex_unlock(&task->signal->cred_guard_mutex);
66791 }
66792
66793-#ifdef CONFIG_STACKTRACE
66794+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66795
66796 #define MAX_STACK_TRACE_DEPTH 64
66797
66798@@ -486,7 +510,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
66799 return count;
66800 }
66801
66802-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66803+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66804 static int proc_pid_syscall(struct task_struct *task, char *buffer)
66805 {
66806 long nr;
66807@@ -515,7 +539,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
66808 /************************************************************************/
66809
66810 /* permission checks */
66811-static int proc_fd_access_allowed(struct inode *inode)
66812+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
66813 {
66814 struct task_struct *task;
66815 int allowed = 0;
66816@@ -525,7 +549,10 @@ static int proc_fd_access_allowed(struct inode *inode)
66817 */
66818 task = get_proc_task(inode);
66819 if (task) {
66820- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
66821+ if (log)
66822+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
66823+ else
66824+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
66825 put_task_struct(task);
66826 }
66827 return allowed;
66828@@ -556,10 +583,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
66829 struct task_struct *task,
66830 int hide_pid_min)
66831 {
66832+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66833+ return false;
66834+
66835+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66836+ rcu_read_lock();
66837+ {
66838+ const struct cred *tmpcred = current_cred();
66839+ const struct cred *cred = __task_cred(task);
66840+
66841+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
66842+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66843+ || in_group_p(grsec_proc_gid)
66844+#endif
66845+ ) {
66846+ rcu_read_unlock();
66847+ return true;
66848+ }
66849+ }
66850+ rcu_read_unlock();
66851+
66852+ if (!pid->hide_pid)
66853+ return false;
66854+#endif
66855+
66856 if (pid->hide_pid < hide_pid_min)
66857 return true;
66858 if (in_group_p(pid->pid_gid))
66859 return true;
66860+
66861 return ptrace_may_access(task, PTRACE_MODE_READ);
66862 }
66863
66864@@ -577,7 +629,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
66865 put_task_struct(task);
66866
66867 if (!has_perms) {
66868+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66869+ {
66870+#else
66871 if (pid->hide_pid == 2) {
66872+#endif
66873 /*
66874 * Let's make getdents(), stat(), and open()
66875 * consistent with each other. If a process
66876@@ -675,6 +731,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
66877 if (!task)
66878 return -ESRCH;
66879
66880+ if (gr_acl_handle_procpidmem(task)) {
66881+ put_task_struct(task);
66882+ return -EPERM;
66883+ }
66884+
66885 mm = mm_access(task, mode);
66886 put_task_struct(task);
66887
66888@@ -690,6 +751,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
66889
66890 file->private_data = mm;
66891
66892+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66893+ file->f_version = current->exec_id;
66894+#endif
66895+
66896 return 0;
66897 }
66898
66899@@ -711,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66900 ssize_t copied;
66901 char *page;
66902
66903+#ifdef CONFIG_GRKERNSEC
66904+ if (write)
66905+ return -EPERM;
66906+#endif
66907+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66908+ if (file->f_version != current->exec_id) {
66909+ gr_log_badprocpid("mem");
66910+ return 0;
66911+ }
66912+#endif
66913+
66914 if (!mm)
66915 return 0;
66916
66917@@ -723,7 +799,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66918 goto free;
66919
66920 while (count > 0) {
66921- int this_len = min_t(int, count, PAGE_SIZE);
66922+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
66923
66924 if (write && copy_from_user(page, buf, this_len)) {
66925 copied = -EFAULT;
66926@@ -815,6 +891,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66927 if (!mm)
66928 return 0;
66929
66930+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66931+ if (file->f_version != current->exec_id) {
66932+ gr_log_badprocpid("environ");
66933+ return 0;
66934+ }
66935+#endif
66936+
66937 page = (char *)__get_free_page(GFP_TEMPORARY);
66938 if (!page)
66939 return -ENOMEM;
66940@@ -824,7 +907,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66941 goto free;
66942 while (count > 0) {
66943 size_t this_len, max_len;
66944- int retval;
66945+ ssize_t retval;
66946
66947 if (src >= (mm->env_end - mm->env_start))
66948 break;
66949@@ -1438,7 +1521,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
66950 int error = -EACCES;
66951
66952 /* Are we allowed to snoop on the tasks file descriptors? */
66953- if (!proc_fd_access_allowed(inode))
66954+ if (!proc_fd_access_allowed(inode, 0))
66955 goto out;
66956
66957 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66958@@ -1482,8 +1565,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
66959 struct path path;
66960
66961 /* Are we allowed to snoop on the tasks file descriptors? */
66962- if (!proc_fd_access_allowed(inode))
66963- goto out;
66964+ /* logging this is needed for learning on chromium to work properly,
66965+ but we don't want to flood the logs from 'ps' which does a readlink
66966+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
66967+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
66968+ */
66969+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
66970+ if (!proc_fd_access_allowed(inode,0))
66971+ goto out;
66972+ } else {
66973+ if (!proc_fd_access_allowed(inode,1))
66974+ goto out;
66975+ }
66976
66977 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66978 if (error)
66979@@ -1533,7 +1626,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
66980 rcu_read_lock();
66981 cred = __task_cred(task);
66982 inode->i_uid = cred->euid;
66983+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66984+ inode->i_gid = grsec_proc_gid;
66985+#else
66986 inode->i_gid = cred->egid;
66987+#endif
66988 rcu_read_unlock();
66989 }
66990 security_task_to_inode(task, inode);
66991@@ -1569,10 +1666,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
66992 return -ENOENT;
66993 }
66994 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
66995+#ifdef CONFIG_GRKERNSEC_PROC_USER
66996+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
66997+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66998+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
66999+#endif
67000 task_dumpable(task)) {
67001 cred = __task_cred(task);
67002 stat->uid = cred->euid;
67003+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67004+ stat->gid = grsec_proc_gid;
67005+#else
67006 stat->gid = cred->egid;
67007+#endif
67008 }
67009 }
67010 rcu_read_unlock();
67011@@ -1610,11 +1716,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
67012
67013 if (task) {
67014 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
67015+#ifdef CONFIG_GRKERNSEC_PROC_USER
67016+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
67017+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67018+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
67019+#endif
67020 task_dumpable(task)) {
67021 rcu_read_lock();
67022 cred = __task_cred(task);
67023 inode->i_uid = cred->euid;
67024+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67025+ inode->i_gid = grsec_proc_gid;
67026+#else
67027 inode->i_gid = cred->egid;
67028+#endif
67029 rcu_read_unlock();
67030 } else {
67031 inode->i_uid = GLOBAL_ROOT_UID;
67032@@ -2149,6 +2264,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
67033 if (!task)
67034 goto out_no_task;
67035
67036+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67037+ goto out;
67038+
67039 /*
67040 * Yes, it does not scale. And it should not. Don't add
67041 * new entries into /proc/<tgid>/ without very good reasons.
67042@@ -2179,6 +2297,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
67043 if (!task)
67044 return -ENOENT;
67045
67046+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67047+ goto out;
67048+
67049 if (!dir_emit_dots(file, ctx))
67050 goto out;
67051
67052@@ -2568,7 +2689,7 @@ static const struct pid_entry tgid_base_stuff[] = {
67053 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
67054 #endif
67055 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
67056-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67057+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67058 INF("syscall", S_IRUSR, proc_pid_syscall),
67059 #endif
67060 INF("cmdline", S_IRUGO, proc_pid_cmdline),
67061@@ -2593,10 +2714,10 @@ static const struct pid_entry tgid_base_stuff[] = {
67062 #ifdef CONFIG_SECURITY
67063 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
67064 #endif
67065-#ifdef CONFIG_KALLSYMS
67066+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67067 INF("wchan", S_IRUGO, proc_pid_wchan),
67068 #endif
67069-#ifdef CONFIG_STACKTRACE
67070+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67071 ONE("stack", S_IRUSR, proc_pid_stack),
67072 #endif
67073 #ifdef CONFIG_SCHEDSTATS
67074@@ -2630,6 +2751,9 @@ static const struct pid_entry tgid_base_stuff[] = {
67075 #ifdef CONFIG_HARDWALL
67076 INF("hardwall", S_IRUGO, proc_pid_hardwall),
67077 #endif
67078+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67079+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
67080+#endif
67081 #ifdef CONFIG_USER_NS
67082 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
67083 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
67084@@ -2760,7 +2884,14 @@ static int proc_pid_instantiate(struct inode *dir,
67085 if (!inode)
67086 goto out;
67087
67088+#ifdef CONFIG_GRKERNSEC_PROC_USER
67089+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
67090+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67091+ inode->i_gid = grsec_proc_gid;
67092+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
67093+#else
67094 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
67095+#endif
67096 inode->i_op = &proc_tgid_base_inode_operations;
67097 inode->i_fop = &proc_tgid_base_operations;
67098 inode->i_flags|=S_IMMUTABLE;
67099@@ -2798,7 +2929,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
67100 if (!task)
67101 goto out;
67102
67103+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67104+ goto out_put_task;
67105+
67106 result = proc_pid_instantiate(dir, dentry, task, NULL);
67107+out_put_task:
67108 put_task_struct(task);
67109 out:
67110 return ERR_PTR(result);
67111@@ -2904,7 +3039,7 @@ static const struct pid_entry tid_base_stuff[] = {
67112 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
67113 #endif
67114 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
67115-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67116+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67117 INF("syscall", S_IRUSR, proc_pid_syscall),
67118 #endif
67119 INF("cmdline", S_IRUGO, proc_pid_cmdline),
67120@@ -2931,10 +3066,10 @@ static const struct pid_entry tid_base_stuff[] = {
67121 #ifdef CONFIG_SECURITY
67122 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
67123 #endif
67124-#ifdef CONFIG_KALLSYMS
67125+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67126 INF("wchan", S_IRUGO, proc_pid_wchan),
67127 #endif
67128-#ifdef CONFIG_STACKTRACE
67129+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67130 ONE("stack", S_IRUSR, proc_pid_stack),
67131 #endif
67132 #ifdef CONFIG_SCHEDSTATS
67133diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
67134index cbd82df..c0407d2 100644
67135--- a/fs/proc/cmdline.c
67136+++ b/fs/proc/cmdline.c
67137@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
67138
67139 static int __init proc_cmdline_init(void)
67140 {
67141+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67142+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
67143+#else
67144 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
67145+#endif
67146 return 0;
67147 }
67148 fs_initcall(proc_cmdline_init);
67149diff --git a/fs/proc/devices.c b/fs/proc/devices.c
67150index 50493ed..248166b 100644
67151--- a/fs/proc/devices.c
67152+++ b/fs/proc/devices.c
67153@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
67154
67155 static int __init proc_devices_init(void)
67156 {
67157+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67158+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
67159+#else
67160 proc_create("devices", 0, NULL, &proc_devinfo_operations);
67161+#endif
67162 return 0;
67163 }
67164 fs_initcall(proc_devices_init);
67165diff --git a/fs/proc/fd.c b/fs/proc/fd.c
67166index 0788d09..9cc1385 100644
67167--- a/fs/proc/fd.c
67168+++ b/fs/proc/fd.c
67169@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
67170 if (!task)
67171 return -ENOENT;
67172
67173- files = get_files_struct(task);
67174+ if (!gr_acl_handle_procpidmem(task))
67175+ files = get_files_struct(task);
67176 put_task_struct(task);
67177
67178 if (files) {
67179@@ -285,11 +286,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
67180 */
67181 int proc_fd_permission(struct inode *inode, int mask)
67182 {
67183+ struct task_struct *task;
67184 int rv = generic_permission(inode, mask);
67185- if (rv == 0)
67186- return 0;
67187+
67188 if (task_tgid(current) == proc_pid(inode))
67189 rv = 0;
67190+
67191+ task = get_proc_task(inode);
67192+ if (task == NULL)
67193+ return rv;
67194+
67195+ if (gr_acl_handle_procpidmem(task))
67196+ rv = -EACCES;
67197+
67198+ put_task_struct(task);
67199+
67200 return rv;
67201 }
67202
67203diff --git a/fs/proc/generic.c b/fs/proc/generic.c
67204index b7f268e..3bea6b7 100644
67205--- a/fs/proc/generic.c
67206+++ b/fs/proc/generic.c
67207@@ -23,6 +23,7 @@
67208 #include <linux/bitops.h>
67209 #include <linux/spinlock.h>
67210 #include <linux/completion.h>
67211+#include <linux/grsecurity.h>
67212 #include <asm/uaccess.h>
67213
67214 #include "internal.h"
67215@@ -207,6 +208,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
67216 return proc_lookup_de(PDE(dir), dir, dentry);
67217 }
67218
67219+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
67220+ unsigned int flags)
67221+{
67222+ if (gr_proc_is_restricted())
67223+ return ERR_PTR(-EACCES);
67224+
67225+ return proc_lookup_de(PDE(dir), dir, dentry);
67226+}
67227+
67228 /*
67229 * This returns non-zero if at EOF, so that the /proc
67230 * root directory can use this and check if it should
67231@@ -264,6 +274,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
67232 return proc_readdir_de(PDE(inode), file, ctx);
67233 }
67234
67235+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
67236+{
67237+ struct inode *inode = file_inode(file);
67238+
67239+ if (gr_proc_is_restricted())
67240+ return -EACCES;
67241+
67242+ return proc_readdir_de(PDE(inode), file, ctx);
67243+}
67244+
67245 /*
67246 * These are the generic /proc directory operations. They
67247 * use the in-memory "struct proc_dir_entry" tree to parse
67248@@ -275,6 +295,12 @@ static const struct file_operations proc_dir_operations = {
67249 .iterate = proc_readdir,
67250 };
67251
67252+static const struct file_operations proc_dir_restricted_operations = {
67253+ .llseek = generic_file_llseek,
67254+ .read = generic_read_dir,
67255+ .iterate = proc_readdir_restrict,
67256+};
67257+
67258 /*
67259 * proc directories can do almost nothing..
67260 */
67261@@ -284,6 +310,12 @@ static const struct inode_operations proc_dir_inode_operations = {
67262 .setattr = proc_notify_change,
67263 };
67264
67265+static const struct inode_operations proc_dir_restricted_inode_operations = {
67266+ .lookup = proc_lookup_restrict,
67267+ .getattr = proc_getattr,
67268+ .setattr = proc_notify_change,
67269+};
67270+
67271 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
67272 {
67273 struct proc_dir_entry *tmp;
67274@@ -294,8 +326,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
67275 return ret;
67276
67277 if (S_ISDIR(dp->mode)) {
67278- dp->proc_fops = &proc_dir_operations;
67279- dp->proc_iops = &proc_dir_inode_operations;
67280+ if (dp->restricted) {
67281+ dp->proc_fops = &proc_dir_restricted_operations;
67282+ dp->proc_iops = &proc_dir_restricted_inode_operations;
67283+ } else {
67284+ dp->proc_fops = &proc_dir_operations;
67285+ dp->proc_iops = &proc_dir_inode_operations;
67286+ }
67287 dir->nlink++;
67288 } else if (S_ISLNK(dp->mode)) {
67289 dp->proc_iops = &proc_link_inode_operations;
67290@@ -407,6 +444,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
67291 }
67292 EXPORT_SYMBOL_GPL(proc_mkdir_data);
67293
67294+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
67295+ struct proc_dir_entry *parent, void *data)
67296+{
67297+ struct proc_dir_entry *ent;
67298+
67299+ if (mode == 0)
67300+ mode = S_IRUGO | S_IXUGO;
67301+
67302+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
67303+ if (ent) {
67304+ ent->data = data;
67305+ ent->restricted = 1;
67306+ if (proc_register(parent, ent) < 0) {
67307+ kfree(ent);
67308+ ent = NULL;
67309+ }
67310+ }
67311+ return ent;
67312+}
67313+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
67314+
67315 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
67316 struct proc_dir_entry *parent)
67317 {
67318@@ -421,6 +479,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
67319 }
67320 EXPORT_SYMBOL(proc_mkdir);
67321
67322+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
67323+ struct proc_dir_entry *parent)
67324+{
67325+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
67326+}
67327+EXPORT_SYMBOL(proc_mkdir_restrict);
67328+
67329 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
67330 struct proc_dir_entry *parent,
67331 const struct file_operations *proc_fops,
67332diff --git a/fs/proc/inode.c b/fs/proc/inode.c
67333index 0adbc02..bee4d0b 100644
67334--- a/fs/proc/inode.c
67335+++ b/fs/proc/inode.c
67336@@ -23,11 +23,17 @@
67337 #include <linux/slab.h>
67338 #include <linux/mount.h>
67339 #include <linux/magic.h>
67340+#include <linux/grsecurity.h>
67341
67342 #include <asm/uaccess.h>
67343
67344 #include "internal.h"
67345
67346+#ifdef CONFIG_PROC_SYSCTL
67347+extern const struct inode_operations proc_sys_inode_operations;
67348+extern const struct inode_operations proc_sys_dir_operations;
67349+#endif
67350+
67351 static void proc_evict_inode(struct inode *inode)
67352 {
67353 struct proc_dir_entry *de;
67354@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
67355 ns = PROC_I(inode)->ns.ns;
67356 if (ns_ops && ns)
67357 ns_ops->put(ns);
67358+
67359+#ifdef CONFIG_PROC_SYSCTL
67360+ if (inode->i_op == &proc_sys_inode_operations ||
67361+ inode->i_op == &proc_sys_dir_operations)
67362+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
67363+#endif
67364+
67365 }
67366
67367 static struct kmem_cache * proc_inode_cachep;
67368@@ -413,7 +426,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
67369 if (de->mode) {
67370 inode->i_mode = de->mode;
67371 inode->i_uid = de->uid;
67372+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67373+ inode->i_gid = grsec_proc_gid;
67374+#else
67375 inode->i_gid = de->gid;
67376+#endif
67377 }
67378 if (de->size)
67379 inode->i_size = de->size;
67380diff --git a/fs/proc/internal.h b/fs/proc/internal.h
67381index 3ab6d14..b26174e 100644
67382--- a/fs/proc/internal.h
67383+++ b/fs/proc/internal.h
67384@@ -46,9 +46,10 @@ struct proc_dir_entry {
67385 struct completion *pde_unload_completion;
67386 struct list_head pde_openers; /* who did ->open, but not ->release */
67387 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
67388+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
67389 u8 namelen;
67390 char name[];
67391-};
67392+} __randomize_layout;
67393
67394 union proc_op {
67395 int (*proc_get_link)(struct dentry *, struct path *);
67396@@ -67,7 +68,7 @@ struct proc_inode {
67397 struct ctl_table *sysctl_entry;
67398 struct proc_ns ns;
67399 struct inode vfs_inode;
67400-};
67401+} __randomize_layout;
67402
67403 /*
67404 * General functions
67405@@ -155,6 +156,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
67406 struct pid *, struct task_struct *);
67407 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
67408 struct pid *, struct task_struct *);
67409+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67410+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
67411+#endif
67412
67413 /*
67414 * base.c
67415@@ -181,9 +185,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
67416 extern spinlock_t proc_subdir_lock;
67417
67418 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
67419+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
67420 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
67421 struct dentry *);
67422 extern int proc_readdir(struct file *, struct dir_context *);
67423+extern int proc_readdir_restrict(struct file *, struct dir_context *);
67424 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
67425
67426 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
67427diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
67428index a352d57..cb94a5c 100644
67429--- a/fs/proc/interrupts.c
67430+++ b/fs/proc/interrupts.c
67431@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
67432
67433 static int __init proc_interrupts_init(void)
67434 {
67435+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67436+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
67437+#else
67438 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
67439+#endif
67440 return 0;
67441 }
67442 fs_initcall(proc_interrupts_init);
67443diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
67444index 39e6ef3..2f9cb5e 100644
67445--- a/fs/proc/kcore.c
67446+++ b/fs/proc/kcore.c
67447@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67448 * the addresses in the elf_phdr on our list.
67449 */
67450 start = kc_offset_to_vaddr(*fpos - elf_buflen);
67451- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
67452+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
67453+ if (tsz > buflen)
67454 tsz = buflen;
67455-
67456+
67457 while (buflen) {
67458 struct kcore_list *m;
67459
67460@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67461 kfree(elf_buf);
67462 } else {
67463 if (kern_addr_valid(start)) {
67464- unsigned long n;
67465+ char *elf_buf;
67466+ mm_segment_t oldfs;
67467
67468- n = copy_to_user(buffer, (char *)start, tsz);
67469- /*
67470- * We cannot distinguish between fault on source
67471- * and fault on destination. When this happens
67472- * we clear too and hope it will trigger the
67473- * EFAULT again.
67474- */
67475- if (n) {
67476- if (clear_user(buffer + tsz - n,
67477- n))
67478+ elf_buf = kmalloc(tsz, GFP_KERNEL);
67479+ if (!elf_buf)
67480+ return -ENOMEM;
67481+ oldfs = get_fs();
67482+ set_fs(KERNEL_DS);
67483+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
67484+ set_fs(oldfs);
67485+ if (copy_to_user(buffer, elf_buf, tsz)) {
67486+ kfree(elf_buf);
67487 return -EFAULT;
67488+ }
67489 }
67490+ set_fs(oldfs);
67491+ kfree(elf_buf);
67492 } else {
67493 if (clear_user(buffer, tsz))
67494 return -EFAULT;
67495@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67496
67497 static int open_kcore(struct inode *inode, struct file *filp)
67498 {
67499+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
67500+ return -EPERM;
67501+#endif
67502 if (!capable(CAP_SYS_RAWIO))
67503 return -EPERM;
67504 if (kcore_need_update)
67505diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
67506index 7445af0..7c5113c 100644
67507--- a/fs/proc/meminfo.c
67508+++ b/fs/proc/meminfo.c
67509@@ -187,7 +187,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
67510 vmi.used >> 10,
67511 vmi.largest_chunk >> 10
67512 #ifdef CONFIG_MEMORY_FAILURE
67513- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
67514+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
67515 #endif
67516 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
67517 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
67518diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
67519index d4a3574..b421ce9 100644
67520--- a/fs/proc/nommu.c
67521+++ b/fs/proc/nommu.c
67522@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
67523
67524 if (file) {
67525 seq_pad(m, ' ');
67526- seq_path(m, &file->f_path, "");
67527+ seq_path(m, &file->f_path, "\n\\");
67528 }
67529
67530 seq_putc(m, '\n');
67531diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
67532index 4677bb7..dad3045 100644
67533--- a/fs/proc/proc_net.c
67534+++ b/fs/proc/proc_net.c
67535@@ -23,9 +23,27 @@
67536 #include <linux/nsproxy.h>
67537 #include <net/net_namespace.h>
67538 #include <linux/seq_file.h>
67539+#include <linux/grsecurity.h>
67540
67541 #include "internal.h"
67542
67543+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67544+static struct seq_operations *ipv6_seq_ops_addr;
67545+
67546+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
67547+{
67548+ ipv6_seq_ops_addr = addr;
67549+}
67550+
67551+void unregister_ipv6_seq_ops_addr(void)
67552+{
67553+ ipv6_seq_ops_addr = NULL;
67554+}
67555+
67556+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
67557+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
67558+#endif
67559+
67560 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
67561 {
67562 return pde->parent->data;
67563@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
67564 return maybe_get_net(PDE_NET(PDE(inode)));
67565 }
67566
67567+extern const struct seq_operations dev_seq_ops;
67568+
67569 int seq_open_net(struct inode *ino, struct file *f,
67570 const struct seq_operations *ops, int size)
67571 {
67572@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
67573
67574 BUG_ON(size < sizeof(*p));
67575
67576+ /* only permit access to /proc/net/dev */
67577+ if (
67578+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67579+ ops != ipv6_seq_ops_addr &&
67580+#endif
67581+ ops != &dev_seq_ops && gr_proc_is_restricted())
67582+ return -EACCES;
67583+
67584 net = get_proc_net(ino);
67585 if (net == NULL)
67586 return -ENXIO;
67587@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
67588 int err;
67589 struct net *net;
67590
67591+ if (gr_proc_is_restricted())
67592+ return -EACCES;
67593+
67594 err = -ENXIO;
67595 net = get_proc_net(inode);
67596 if (net == NULL)
67597diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
67598index 7129046..6914844 100644
67599--- a/fs/proc/proc_sysctl.c
67600+++ b/fs/proc/proc_sysctl.c
67601@@ -11,13 +11,21 @@
67602 #include <linux/namei.h>
67603 #include <linux/mm.h>
67604 #include <linux/module.h>
67605+#include <linux/nsproxy.h>
67606+#ifdef CONFIG_GRKERNSEC
67607+#include <net/net_namespace.h>
67608+#endif
67609 #include "internal.h"
67610
67611+extern int gr_handle_chroot_sysctl(const int op);
67612+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
67613+ const int op);
67614+
67615 static const struct dentry_operations proc_sys_dentry_operations;
67616 static const struct file_operations proc_sys_file_operations;
67617-static const struct inode_operations proc_sys_inode_operations;
67618+const struct inode_operations proc_sys_inode_operations;
67619 static const struct file_operations proc_sys_dir_file_operations;
67620-static const struct inode_operations proc_sys_dir_operations;
67621+const struct inode_operations proc_sys_dir_operations;
67622
67623 void proc_sys_poll_notify(struct ctl_table_poll *poll)
67624 {
67625@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
67626
67627 err = NULL;
67628 d_set_d_op(dentry, &proc_sys_dentry_operations);
67629+
67630+ gr_handle_proc_create(dentry, inode);
67631+
67632 d_add(dentry, inode);
67633
67634 out:
67635@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67636 struct inode *inode = file_inode(filp);
67637 struct ctl_table_header *head = grab_header(inode);
67638 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
67639+ int op = write ? MAY_WRITE : MAY_READ;
67640 ssize_t error;
67641 size_t res;
67642
67643@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67644 * and won't be until we finish.
67645 */
67646 error = -EPERM;
67647- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
67648+ if (sysctl_perm(head, table, op))
67649 goto out;
67650
67651 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
67652@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67653 if (!table->proc_handler)
67654 goto out;
67655
67656+#ifdef CONFIG_GRKERNSEC
67657+ error = -EPERM;
67658+ if (gr_handle_chroot_sysctl(op))
67659+ goto out;
67660+ dget(filp->f_path.dentry);
67661+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
67662+ dput(filp->f_path.dentry);
67663+ goto out;
67664+ }
67665+ dput(filp->f_path.dentry);
67666+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
67667+ goto out;
67668+ if (write) {
67669+ if (current->nsproxy->net_ns != table->extra2) {
67670+ if (!capable(CAP_SYS_ADMIN))
67671+ goto out;
67672+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
67673+ goto out;
67674+ }
67675+#endif
67676+
67677 /* careful: calling conventions are nasty here */
67678 res = count;
67679 error = table->proc_handler(table, write, buf, &res, ppos);
67680@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
67681 return false;
67682 } else {
67683 d_set_d_op(child, &proc_sys_dentry_operations);
67684+
67685+ gr_handle_proc_create(child, inode);
67686+
67687 d_add(child, inode);
67688 }
67689 } else {
67690@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
67691 if ((*pos)++ < ctx->pos)
67692 return true;
67693
67694+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
67695+ return 0;
67696+
67697 if (unlikely(S_ISLNK(table->mode)))
67698 res = proc_sys_link_fill_cache(file, ctx, head, table);
67699 else
67700@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
67701 if (IS_ERR(head))
67702 return PTR_ERR(head);
67703
67704+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
67705+ return -ENOENT;
67706+
67707 generic_fillattr(inode, stat);
67708 if (table)
67709 stat->mode = (stat->mode & S_IFMT) | table->mode;
67710@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
67711 .llseek = generic_file_llseek,
67712 };
67713
67714-static const struct inode_operations proc_sys_inode_operations = {
67715+const struct inode_operations proc_sys_inode_operations = {
67716 .permission = proc_sys_permission,
67717 .setattr = proc_sys_setattr,
67718 .getattr = proc_sys_getattr,
67719 };
67720
67721-static const struct inode_operations proc_sys_dir_operations = {
67722+const struct inode_operations proc_sys_dir_operations = {
67723 .lookup = proc_sys_lookup,
67724 .permission = proc_sys_permission,
67725 .setattr = proc_sys_setattr,
67726@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
67727 static struct ctl_dir *new_dir(struct ctl_table_set *set,
67728 const char *name, int namelen)
67729 {
67730- struct ctl_table *table;
67731+ ctl_table_no_const *table;
67732 struct ctl_dir *new;
67733 struct ctl_node *node;
67734 char *new_name;
67735@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
67736 return NULL;
67737
67738 node = (struct ctl_node *)(new + 1);
67739- table = (struct ctl_table *)(node + 1);
67740+ table = (ctl_table_no_const *)(node + 1);
67741 new_name = (char *)(table + 2);
67742 memcpy(new_name, name, namelen);
67743 new_name[namelen] = '\0';
67744@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
67745 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
67746 struct ctl_table_root *link_root)
67747 {
67748- struct ctl_table *link_table, *entry, *link;
67749+ ctl_table_no_const *link_table, *link;
67750+ struct ctl_table *entry;
67751 struct ctl_table_header *links;
67752 struct ctl_node *node;
67753 char *link_name;
67754@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
67755 return NULL;
67756
67757 node = (struct ctl_node *)(links + 1);
67758- link_table = (struct ctl_table *)(node + nr_entries);
67759+ link_table = (ctl_table_no_const *)(node + nr_entries);
67760 link_name = (char *)&link_table[nr_entries + 1];
67761
67762 for (link = link_table, entry = table; entry->procname; link++, entry++) {
67763@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
67764 struct ctl_table_header ***subheader, struct ctl_table_set *set,
67765 struct ctl_table *table)
67766 {
67767- struct ctl_table *ctl_table_arg = NULL;
67768- struct ctl_table *entry, *files;
67769+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
67770+ struct ctl_table *entry;
67771 int nr_files = 0;
67772 int nr_dirs = 0;
67773 int err = -ENOMEM;
67774@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
67775 nr_files++;
67776 }
67777
67778- files = table;
67779 /* If there are mixed files and directories we need a new table */
67780 if (nr_dirs && nr_files) {
67781- struct ctl_table *new;
67782+ ctl_table_no_const *new;
67783 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
67784 GFP_KERNEL);
67785 if (!files)
67786@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
67787 /* Register everything except a directory full of subdirectories */
67788 if (nr_files || !nr_dirs) {
67789 struct ctl_table_header *header;
67790- header = __register_sysctl_table(set, path, files);
67791+ header = __register_sysctl_table(set, path, files ? files : table);
67792 if (!header) {
67793 kfree(ctl_table_arg);
67794 goto out;
67795diff --git a/fs/proc/root.c b/fs/proc/root.c
67796index 5dbadec..473af2f 100644
67797--- a/fs/proc/root.c
67798+++ b/fs/proc/root.c
67799@@ -185,7 +185,15 @@ void __init proc_root_init(void)
67800 proc_mkdir("openprom", NULL);
67801 #endif
67802 proc_tty_init();
67803+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67804+#ifdef CONFIG_GRKERNSEC_PROC_USER
67805+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
67806+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67807+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
67808+#endif
67809+#else
67810 proc_mkdir("bus", NULL);
67811+#endif
67812 proc_sys_init();
67813 }
67814
67815diff --git a/fs/proc/stat.c b/fs/proc/stat.c
67816index bf2d03f..f058f9c 100644
67817--- a/fs/proc/stat.c
67818+++ b/fs/proc/stat.c
67819@@ -11,6 +11,7 @@
67820 #include <linux/irqnr.h>
67821 #include <linux/cputime.h>
67822 #include <linux/tick.h>
67823+#include <linux/grsecurity.h>
67824
67825 #ifndef arch_irq_stat_cpu
67826 #define arch_irq_stat_cpu(cpu) 0
67827@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
67828 u64 sum_softirq = 0;
67829 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
67830 struct timespec boottime;
67831+ int unrestricted = 1;
67832+
67833+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67834+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67835+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
67836+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67837+ && !in_group_p(grsec_proc_gid)
67838+#endif
67839+ )
67840+ unrestricted = 0;
67841+#endif
67842+#endif
67843
67844 user = nice = system = idle = iowait =
67845 irq = softirq = steal = 0;
67846@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
67847 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
67848 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
67849 idle += get_idle_time(i);
67850- iowait += get_iowait_time(i);
67851- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67852- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67853- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67854- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67855- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67856- sum += kstat_cpu_irqs_sum(i);
67857- sum += arch_irq_stat_cpu(i);
67858+ if (unrestricted) {
67859+ iowait += get_iowait_time(i);
67860+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67861+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67862+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67863+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67864+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67865+ sum += kstat_cpu_irqs_sum(i);
67866+ sum += arch_irq_stat_cpu(i);
67867+ for (j = 0; j < NR_SOFTIRQS; j++) {
67868+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
67869
67870- for (j = 0; j < NR_SOFTIRQS; j++) {
67871- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
67872-
67873- per_softirq_sums[j] += softirq_stat;
67874- sum_softirq += softirq_stat;
67875+ per_softirq_sums[j] += softirq_stat;
67876+ sum_softirq += softirq_stat;
67877+ }
67878 }
67879 }
67880- sum += arch_irq_stat();
67881+ if (unrestricted)
67882+ sum += arch_irq_stat();
67883
67884 seq_puts(p, "cpu ");
67885 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
67886@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
67887 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
67888 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
67889 idle = get_idle_time(i);
67890- iowait = get_iowait_time(i);
67891- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67892- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67893- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67894- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67895- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67896+ if (unrestricted) {
67897+ iowait = get_iowait_time(i);
67898+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67899+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67900+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67901+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67902+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67903+ }
67904 seq_printf(p, "cpu%d", i);
67905 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
67906 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
67907@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
67908
67909 /* sum again ? it could be updated? */
67910 for_each_irq_nr(j)
67911- seq_put_decimal_ull(p, ' ', kstat_irqs(j));
67912+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs(j) : 0ULL);
67913
67914 seq_printf(p,
67915 "\nctxt %llu\n"
67916@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
67917 "processes %lu\n"
67918 "procs_running %lu\n"
67919 "procs_blocked %lu\n",
67920- nr_context_switches(),
67921+ unrestricted ? nr_context_switches() : 0ULL,
67922 (unsigned long)jif,
67923- total_forks,
67924- nr_running(),
67925- nr_iowait());
67926+ unrestricted ? total_forks : 0UL,
67927+ unrestricted ? nr_running() : 0UL,
67928+ unrestricted ? nr_iowait() : 0UL);
67929
67930 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
67931
67932diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
67933index cfa63ee..fce112e 100644
67934--- a/fs/proc/task_mmu.c
67935+++ b/fs/proc/task_mmu.c
67936@@ -13,12 +13,19 @@
67937 #include <linux/swap.h>
67938 #include <linux/swapops.h>
67939 #include <linux/mmu_notifier.h>
67940+#include <linux/grsecurity.h>
67941
67942 #include <asm/elf.h>
67943 #include <asm/uaccess.h>
67944 #include <asm/tlbflush.h>
67945 #include "internal.h"
67946
67947+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67948+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
67949+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
67950+ _mm->pax_flags & MF_PAX_SEGMEXEC))
67951+#endif
67952+
67953 void task_mem(struct seq_file *m, struct mm_struct *mm)
67954 {
67955 unsigned long data, text, lib, swap;
67956@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67957 "VmExe:\t%8lu kB\n"
67958 "VmLib:\t%8lu kB\n"
67959 "VmPTE:\t%8lu kB\n"
67960- "VmSwap:\t%8lu kB\n",
67961- hiwater_vm << (PAGE_SHIFT-10),
67962+ "VmSwap:\t%8lu kB\n"
67963+
67964+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67965+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
67966+#endif
67967+
67968+ ,hiwater_vm << (PAGE_SHIFT-10),
67969 total_vm << (PAGE_SHIFT-10),
67970 mm->locked_vm << (PAGE_SHIFT-10),
67971 mm->pinned_vm << (PAGE_SHIFT-10),
67972@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67973 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
67974 (PTRS_PER_PTE * sizeof(pte_t) *
67975 atomic_long_read(&mm->nr_ptes)) >> 10,
67976- swap << (PAGE_SHIFT-10));
67977+ swap << (PAGE_SHIFT-10)
67978+
67979+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67980+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67981+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
67982+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
67983+#else
67984+ , mm->context.user_cs_base
67985+ , mm->context.user_cs_limit
67986+#endif
67987+#endif
67988+
67989+ );
67990 }
67991
67992 unsigned long task_vsize(struct mm_struct *mm)
67993@@ -271,13 +295,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67994 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
67995 }
67996
67997- /* We don't show the stack guard page in /proc/maps */
67998+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67999+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
68000+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
68001+#else
68002 start = vma->vm_start;
68003- if (stack_guard_page_start(vma, start))
68004- start += PAGE_SIZE;
68005 end = vma->vm_end;
68006- if (stack_guard_page_end(vma, end))
68007- end -= PAGE_SIZE;
68008+#endif
68009
68010 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
68011 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
68012@@ -287,7 +311,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68013 flags & VM_WRITE ? 'w' : '-',
68014 flags & VM_EXEC ? 'x' : '-',
68015 flags & VM_MAYSHARE ? 's' : 'p',
68016+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68017+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
68018+#else
68019 pgoff,
68020+#endif
68021 MAJOR(dev), MINOR(dev), ino);
68022
68023 /*
68024@@ -296,7 +324,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68025 */
68026 if (file) {
68027 seq_pad(m, ' ');
68028- seq_path(m, &file->f_path, "\n");
68029+ seq_path(m, &file->f_path, "\n\\");
68030 goto done;
68031 }
68032
68033@@ -328,8 +356,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68034 * Thread stack in /proc/PID/task/TID/maps or
68035 * the main process stack.
68036 */
68037- if (!is_pid || (vma->vm_start <= mm->start_stack &&
68038- vma->vm_end >= mm->start_stack)) {
68039+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
68040+ (vma->vm_start <= mm->start_stack &&
68041+ vma->vm_end >= mm->start_stack)) {
68042 name = "[stack]";
68043 } else {
68044 /* Thread stack in /proc/PID/maps */
68045@@ -353,6 +382,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
68046 struct proc_maps_private *priv = m->private;
68047 struct task_struct *task = priv->task;
68048
68049+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68050+ if (current->exec_id != m->exec_id) {
68051+ gr_log_badprocpid("maps");
68052+ return 0;
68053+ }
68054+#endif
68055+
68056 show_map_vma(m, vma, is_pid);
68057
68058 if (m->count < m->size) /* vma is copied successfully */
68059@@ -593,12 +629,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
68060 .private = &mss,
68061 };
68062
68063+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68064+ if (current->exec_id != m->exec_id) {
68065+ gr_log_badprocpid("smaps");
68066+ return 0;
68067+ }
68068+#endif
68069 memset(&mss, 0, sizeof mss);
68070- mss.vma = vma;
68071- /* mmap_sem is held in m_start */
68072- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
68073- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
68074-
68075+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68076+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
68077+#endif
68078+ mss.vma = vma;
68079+ /* mmap_sem is held in m_start */
68080+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
68081+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
68082+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68083+ }
68084+#endif
68085 show_map_vma(m, vma, is_pid);
68086
68087 seq_printf(m,
68088@@ -616,7 +663,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
68089 "KernelPageSize: %8lu kB\n"
68090 "MMUPageSize: %8lu kB\n"
68091 "Locked: %8lu kB\n",
68092+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68093+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
68094+#else
68095 (vma->vm_end - vma->vm_start) >> 10,
68096+#endif
68097 mss.resident >> 10,
68098 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
68099 mss.shared_clean >> 10,
68100@@ -1398,6 +1449,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
68101 char buffer[64];
68102 int nid;
68103
68104+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68105+ if (current->exec_id != m->exec_id) {
68106+ gr_log_badprocpid("numa_maps");
68107+ return 0;
68108+ }
68109+#endif
68110+
68111 if (!mm)
68112 return 0;
68113
68114@@ -1415,11 +1473,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
68115 mpol_to_str(buffer, sizeof(buffer), pol);
68116 mpol_cond_put(pol);
68117
68118+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68119+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
68120+#else
68121 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
68122+#endif
68123
68124 if (file) {
68125 seq_puts(m, " file=");
68126- seq_path(m, &file->f_path, "\n\t= ");
68127+ seq_path(m, &file->f_path, "\n\t\\= ");
68128 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
68129 seq_puts(m, " heap");
68130 } else {
68131diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
68132index 678455d..ebd3245 100644
68133--- a/fs/proc/task_nommu.c
68134+++ b/fs/proc/task_nommu.c
68135@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68136 else
68137 bytes += kobjsize(mm);
68138
68139- if (current->fs && current->fs->users > 1)
68140+ if (current->fs && atomic_read(&current->fs->users) > 1)
68141 sbytes += kobjsize(current->fs);
68142 else
68143 bytes += kobjsize(current->fs);
68144@@ -161,7 +161,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
68145
68146 if (file) {
68147 seq_pad(m, ' ');
68148- seq_path(m, &file->f_path, "");
68149+ seq_path(m, &file->f_path, "\n\\");
68150 } else if (mm) {
68151 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
68152
68153diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
68154index 382aa89..6b03974 100644
68155--- a/fs/proc/vmcore.c
68156+++ b/fs/proc/vmcore.c
68157@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
68158 nr_bytes = count;
68159
68160 /* If pfn is not ram, return zeros for sparse dump files */
68161- if (pfn_is_ram(pfn) == 0)
68162- memset(buf, 0, nr_bytes);
68163- else {
68164+ if (pfn_is_ram(pfn) == 0) {
68165+ if (userbuf) {
68166+ if (clear_user((char __force_user *)buf, nr_bytes))
68167+ return -EFAULT;
68168+ } else
68169+ memset(buf, 0, nr_bytes);
68170+ } else {
68171 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
68172 offset, userbuf);
68173 if (tmp < 0)
68174@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
68175 static int copy_to(void *target, void *src, size_t size, int userbuf)
68176 {
68177 if (userbuf) {
68178- if (copy_to_user((char __user *) target, src, size))
68179+ if (copy_to_user((char __force_user *) target, src, size))
68180 return -EFAULT;
68181 } else {
68182 memcpy(target, src, size);
68183@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68184 if (*fpos < m->offset + m->size) {
68185 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
68186 start = m->paddr + *fpos - m->offset;
68187- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
68188+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
68189 if (tmp < 0)
68190 return tmp;
68191 buflen -= tsz;
68192@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68193 static ssize_t read_vmcore(struct file *file, char __user *buffer,
68194 size_t buflen, loff_t *fpos)
68195 {
68196- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
68197+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
68198 }
68199
68200 /*
68201diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
68202index b00fcc9..e0c6381 100644
68203--- a/fs/qnx6/qnx6.h
68204+++ b/fs/qnx6/qnx6.h
68205@@ -74,7 +74,7 @@ enum {
68206 BYTESEX_BE,
68207 };
68208
68209-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68210+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68211 {
68212 if (sbi->s_bytesex == BYTESEX_LE)
68213 return le64_to_cpu((__force __le64)n);
68214@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
68215 return (__force __fs64)cpu_to_be64(n);
68216 }
68217
68218-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68219+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68220 {
68221 if (sbi->s_bytesex == BYTESEX_LE)
68222 return le32_to_cpu((__force __le32)n);
68223diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
68224index 72d2917..c917c12 100644
68225--- a/fs/quota/netlink.c
68226+++ b/fs/quota/netlink.c
68227@@ -45,7 +45,7 @@ static struct genl_family quota_genl_family = {
68228 void quota_send_warning(struct kqid qid, dev_t dev,
68229 const char warntype)
68230 {
68231- static atomic_t seq;
68232+ static atomic_unchecked_t seq;
68233 struct sk_buff *skb;
68234 void *msg_head;
68235 int ret;
68236@@ -61,7 +61,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
68237 "VFS: Not enough memory to send quota warning.\n");
68238 return;
68239 }
68240- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
68241+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
68242 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
68243 if (!msg_head) {
68244 printk(KERN_ERR
68245diff --git a/fs/read_write.c b/fs/read_write.c
68246index 009d854..16ce214 100644
68247--- a/fs/read_write.c
68248+++ b/fs/read_write.c
68249@@ -495,7 +495,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
68250
68251 old_fs = get_fs();
68252 set_fs(get_ds());
68253- p = (__force const char __user *)buf;
68254+ p = (const char __force_user *)buf;
68255 if (count > MAX_RW_COUNT)
68256 count = MAX_RW_COUNT;
68257 if (file->f_op->write)
68258diff --git a/fs/readdir.c b/fs/readdir.c
68259index 33fd922..e0d6094 100644
68260--- a/fs/readdir.c
68261+++ b/fs/readdir.c
68262@@ -18,6 +18,7 @@
68263 #include <linux/security.h>
68264 #include <linux/syscalls.h>
68265 #include <linux/unistd.h>
68266+#include <linux/namei.h>
68267
68268 #include <asm/uaccess.h>
68269
68270@@ -71,6 +72,7 @@ struct old_linux_dirent {
68271 struct readdir_callback {
68272 struct dir_context ctx;
68273 struct old_linux_dirent __user * dirent;
68274+ struct file * file;
68275 int result;
68276 };
68277
68278@@ -88,6 +90,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
68279 buf->result = -EOVERFLOW;
68280 return -EOVERFLOW;
68281 }
68282+
68283+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68284+ return 0;
68285+
68286 buf->result++;
68287 dirent = buf->dirent;
68288 if (!access_ok(VERIFY_WRITE, dirent,
68289@@ -119,6 +125,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
68290 if (!f.file)
68291 return -EBADF;
68292
68293+ buf.file = f.file;
68294 error = iterate_dir(f.file, &buf.ctx);
68295 if (buf.result)
68296 error = buf.result;
68297@@ -144,6 +151,7 @@ struct getdents_callback {
68298 struct dir_context ctx;
68299 struct linux_dirent __user * current_dir;
68300 struct linux_dirent __user * previous;
68301+ struct file * file;
68302 int count;
68303 int error;
68304 };
68305@@ -165,6 +173,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
68306 buf->error = -EOVERFLOW;
68307 return -EOVERFLOW;
68308 }
68309+
68310+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68311+ return 0;
68312+
68313 dirent = buf->previous;
68314 if (dirent) {
68315 if (__put_user(offset, &dirent->d_off))
68316@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
68317 if (!f.file)
68318 return -EBADF;
68319
68320+ buf.file = f.file;
68321 error = iterate_dir(f.file, &buf.ctx);
68322 if (error >= 0)
68323 error = buf.error;
68324@@ -228,6 +241,7 @@ struct getdents_callback64 {
68325 struct dir_context ctx;
68326 struct linux_dirent64 __user * current_dir;
68327 struct linux_dirent64 __user * previous;
68328+ struct file *file;
68329 int count;
68330 int error;
68331 };
68332@@ -243,6 +257,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
68333 buf->error = -EINVAL; /* only used if we fail.. */
68334 if (reclen > buf->count)
68335 return -EINVAL;
68336+
68337+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68338+ return 0;
68339+
68340 dirent = buf->previous;
68341 if (dirent) {
68342 if (__put_user(offset, &dirent->d_off))
68343@@ -290,6 +308,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
68344 if (!f.file)
68345 return -EBADF;
68346
68347+ buf.file = f.file;
68348 error = iterate_dir(f.file, &buf.ctx);
68349 if (error >= 0)
68350 error = buf.error;
68351diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
68352index 54fdf19..987862b 100644
68353--- a/fs/reiserfs/do_balan.c
68354+++ b/fs/reiserfs/do_balan.c
68355@@ -1872,7 +1872,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
68356 return;
68357 }
68358
68359- atomic_inc(&fs_generation(tb->tb_sb));
68360+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
68361 do_balance_starts(tb);
68362
68363 /*
68364diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
68365index cfaee91..b9d0d60 100644
68366--- a/fs/reiserfs/item_ops.c
68367+++ b/fs/reiserfs/item_ops.c
68368@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
68369 }
68370
68371 static struct item_operations errcatch_ops = {
68372- errcatch_bytes_number,
68373- errcatch_decrement_key,
68374- errcatch_is_left_mergeable,
68375- errcatch_print_item,
68376- errcatch_check_item,
68377+ .bytes_number = errcatch_bytes_number,
68378+ .decrement_key = errcatch_decrement_key,
68379+ .is_left_mergeable = errcatch_is_left_mergeable,
68380+ .print_item = errcatch_print_item,
68381+ .check_item = errcatch_check_item,
68382
68383- errcatch_create_vi,
68384- errcatch_check_left,
68385- errcatch_check_right,
68386- errcatch_part_size,
68387- errcatch_unit_num,
68388- errcatch_print_vi
68389+ .create_vi = errcatch_create_vi,
68390+ .check_left = errcatch_check_left,
68391+ .check_right = errcatch_check_right,
68392+ .part_size = errcatch_part_size,
68393+ .unit_num = errcatch_unit_num,
68394+ .print_vi = errcatch_print_vi
68395 };
68396
68397 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
68398diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
68399index 02b0b7d..c85018b 100644
68400--- a/fs/reiserfs/procfs.c
68401+++ b/fs/reiserfs/procfs.c
68402@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
68403 "SMALL_TAILS " : "NO_TAILS ",
68404 replay_only(sb) ? "REPLAY_ONLY " : "",
68405 convert_reiserfs(sb) ? "CONV " : "",
68406- atomic_read(&r->s_generation_counter),
68407+ atomic_read_unchecked(&r->s_generation_counter),
68408 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
68409 SF(s_do_balance), SF(s_unneeded_left_neighbor),
68410 SF(s_good_search_by_key_reada), SF(s_bmaps),
68411diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
68412index bf53888..227f5ae 100644
68413--- a/fs/reiserfs/reiserfs.h
68414+++ b/fs/reiserfs/reiserfs.h
68415@@ -573,7 +573,7 @@ struct reiserfs_sb_info {
68416 /* Comment? -Hans */
68417 wait_queue_head_t s_wait;
68418 /* increased by one every time the tree gets re-balanced */
68419- atomic_t s_generation_counter;
68420+ atomic_unchecked_t s_generation_counter;
68421
68422 /* File system properties. Currently holds on-disk FS format */
68423 unsigned long s_properties;
68424@@ -2294,7 +2294,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
68425 #define REISERFS_USER_MEM 1 /* user memory mode */
68426
68427 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
68428-#define get_generation(s) atomic_read (&fs_generation(s))
68429+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
68430 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
68431 #define __fs_changed(gen,s) (gen != get_generation (s))
68432 #define fs_changed(gen,s) \
68433diff --git a/fs/select.c b/fs/select.c
68434index 467bb1c..cf9d65a 100644
68435--- a/fs/select.c
68436+++ b/fs/select.c
68437@@ -20,6 +20,7 @@
68438 #include <linux/export.h>
68439 #include <linux/slab.h>
68440 #include <linux/poll.h>
68441+#include <linux/security.h>
68442 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
68443 #include <linux/file.h>
68444 #include <linux/fdtable.h>
68445@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
68446 struct poll_list *walk = head;
68447 unsigned long todo = nfds;
68448
68449+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
68450 if (nfds > rlimit(RLIMIT_NOFILE))
68451 return -EINVAL;
68452
68453diff --git a/fs/seq_file.c b/fs/seq_file.c
68454index 3857b72..0b7281e 100644
68455--- a/fs/seq_file.c
68456+++ b/fs/seq_file.c
68457@@ -12,6 +12,8 @@
68458 #include <linux/slab.h>
68459 #include <linux/cred.h>
68460 #include <linux/mm.h>
68461+#include <linux/sched.h>
68462+#include <linux/grsecurity.h>
68463
68464 #include <asm/uaccess.h>
68465 #include <asm/page.h>
68466@@ -34,12 +36,7 @@ static void seq_set_overflow(struct seq_file *m)
68467
68468 static void *seq_buf_alloc(unsigned long size)
68469 {
68470- void *buf;
68471-
68472- buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
68473- if (!buf && size > PAGE_SIZE)
68474- buf = vmalloc(size);
68475- return buf;
68476+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
68477 }
68478
68479 /**
68480@@ -72,6 +69,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
68481 #ifdef CONFIG_USER_NS
68482 p->user_ns = file->f_cred->user_ns;
68483 #endif
68484+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68485+ p->exec_id = current->exec_id;
68486+#endif
68487
68488 /*
68489 * Wrappers around seq_open(e.g. swaps_open) need to be
68490@@ -94,6 +94,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
68491 }
68492 EXPORT_SYMBOL(seq_open);
68493
68494+
68495+int seq_open_restrict(struct file *file, const struct seq_operations *op)
68496+{
68497+ if (gr_proc_is_restricted())
68498+ return -EACCES;
68499+
68500+ return seq_open(file, op);
68501+}
68502+EXPORT_SYMBOL(seq_open_restrict);
68503+
68504 static int traverse(struct seq_file *m, loff_t offset)
68505 {
68506 loff_t pos = 0, index;
68507@@ -165,7 +175,7 @@ Eoverflow:
68508 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
68509 {
68510 struct seq_file *m = file->private_data;
68511- size_t copied = 0;
68512+ ssize_t copied = 0;
68513 loff_t pos;
68514 size_t n;
68515 void *p;
68516@@ -596,7 +606,7 @@ static void single_stop(struct seq_file *p, void *v)
68517 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
68518 void *data)
68519 {
68520- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
68521+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
68522 int res = -ENOMEM;
68523
68524 if (op) {
68525@@ -632,6 +642,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
68526 }
68527 EXPORT_SYMBOL(single_open_size);
68528
68529+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
68530+ void *data)
68531+{
68532+ if (gr_proc_is_restricted())
68533+ return -EACCES;
68534+
68535+ return single_open(file, show, data);
68536+}
68537+EXPORT_SYMBOL(single_open_restrict);
68538+
68539+
68540 int single_release(struct inode *inode, struct file *file)
68541 {
68542 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
68543diff --git a/fs/splice.c b/fs/splice.c
68544index f5cb9ba..8ddb1e9 100644
68545--- a/fs/splice.c
68546+++ b/fs/splice.c
68547@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68548 pipe_lock(pipe);
68549
68550 for (;;) {
68551- if (!pipe->readers) {
68552+ if (!atomic_read(&pipe->readers)) {
68553 send_sig(SIGPIPE, current, 0);
68554 if (!ret)
68555 ret = -EPIPE;
68556@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68557 page_nr++;
68558 ret += buf->len;
68559
68560- if (pipe->files)
68561+ if (atomic_read(&pipe->files))
68562 do_wakeup = 1;
68563
68564 if (!--spd->nr_pages)
68565@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68566 do_wakeup = 0;
68567 }
68568
68569- pipe->waiting_writers++;
68570+ atomic_inc(&pipe->waiting_writers);
68571 pipe_wait(pipe);
68572- pipe->waiting_writers--;
68573+ atomic_dec(&pipe->waiting_writers);
68574 }
68575
68576 pipe_unlock(pipe);
68577@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
68578 old_fs = get_fs();
68579 set_fs(get_ds());
68580 /* The cast to a user pointer is valid due to the set_fs() */
68581- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
68582+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
68583 set_fs(old_fs);
68584
68585 return res;
68586@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
68587 old_fs = get_fs();
68588 set_fs(get_ds());
68589 /* The cast to a user pointer is valid due to the set_fs() */
68590- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
68591+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
68592 set_fs(old_fs);
68593
68594 return res;
68595@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
68596 goto err;
68597
68598 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
68599- vec[i].iov_base = (void __user *) page_address(page);
68600+ vec[i].iov_base = (void __force_user *) page_address(page);
68601 vec[i].iov_len = this_len;
68602 spd.pages[i] = page;
68603 spd.nr_pages++;
68604@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68605 ops->release(pipe, buf);
68606 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
68607 pipe->nrbufs--;
68608- if (pipe->files)
68609+ if (atomic_read(&pipe->files))
68610 sd->need_wakeup = true;
68611 }
68612
68613@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68614 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
68615 {
68616 while (!pipe->nrbufs) {
68617- if (!pipe->writers)
68618+ if (!atomic_read(&pipe->writers))
68619 return 0;
68620
68621- if (!pipe->waiting_writers && sd->num_spliced)
68622+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
68623 return 0;
68624
68625 if (sd->flags & SPLICE_F_NONBLOCK)
68626@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
68627 ops->release(pipe, buf);
68628 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
68629 pipe->nrbufs--;
68630- if (pipe->files)
68631+ if (atomic_read(&pipe->files))
68632 sd.need_wakeup = true;
68633 } else {
68634 buf->offset += ret;
68635@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
68636 * out of the pipe right after the splice_to_pipe(). So set
68637 * PIPE_READERS appropriately.
68638 */
68639- pipe->readers = 1;
68640+ atomic_set(&pipe->readers, 1);
68641
68642 current->splice_pipe = pipe;
68643 }
68644@@ -1496,6 +1496,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
68645
68646 partial[buffers].offset = off;
68647 partial[buffers].len = plen;
68648+ partial[buffers].private = 0;
68649
68650 off = 0;
68651 len -= plen;
68652@@ -1732,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68653 ret = -ERESTARTSYS;
68654 break;
68655 }
68656- if (!pipe->writers)
68657+ if (!atomic_read(&pipe->writers))
68658 break;
68659- if (!pipe->waiting_writers) {
68660+ if (!atomic_read(&pipe->waiting_writers)) {
68661 if (flags & SPLICE_F_NONBLOCK) {
68662 ret = -EAGAIN;
68663 break;
68664@@ -1766,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68665 pipe_lock(pipe);
68666
68667 while (pipe->nrbufs >= pipe->buffers) {
68668- if (!pipe->readers) {
68669+ if (!atomic_read(&pipe->readers)) {
68670 send_sig(SIGPIPE, current, 0);
68671 ret = -EPIPE;
68672 break;
68673@@ -1779,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68674 ret = -ERESTARTSYS;
68675 break;
68676 }
68677- pipe->waiting_writers++;
68678+ atomic_inc(&pipe->waiting_writers);
68679 pipe_wait(pipe);
68680- pipe->waiting_writers--;
68681+ atomic_dec(&pipe->waiting_writers);
68682 }
68683
68684 pipe_unlock(pipe);
68685@@ -1817,14 +1818,14 @@ retry:
68686 pipe_double_lock(ipipe, opipe);
68687
68688 do {
68689- if (!opipe->readers) {
68690+ if (!atomic_read(&opipe->readers)) {
68691 send_sig(SIGPIPE, current, 0);
68692 if (!ret)
68693 ret = -EPIPE;
68694 break;
68695 }
68696
68697- if (!ipipe->nrbufs && !ipipe->writers)
68698+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
68699 break;
68700
68701 /*
68702@@ -1921,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
68703 pipe_double_lock(ipipe, opipe);
68704
68705 do {
68706- if (!opipe->readers) {
68707+ if (!atomic_read(&opipe->readers)) {
68708 send_sig(SIGPIPE, current, 0);
68709 if (!ret)
68710 ret = -EPIPE;
68711@@ -1966,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
68712 * return EAGAIN if we have the potential of some data in the
68713 * future, otherwise just return 0
68714 */
68715- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
68716+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
68717 ret = -EAGAIN;
68718
68719 pipe_unlock(ipipe);
68720diff --git a/fs/stat.c b/fs/stat.c
68721index ae0c3ce..9ee641c 100644
68722--- a/fs/stat.c
68723+++ b/fs/stat.c
68724@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
68725 stat->gid = inode->i_gid;
68726 stat->rdev = inode->i_rdev;
68727 stat->size = i_size_read(inode);
68728- stat->atime = inode->i_atime;
68729- stat->mtime = inode->i_mtime;
68730+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
68731+ stat->atime = inode->i_ctime;
68732+ stat->mtime = inode->i_ctime;
68733+ } else {
68734+ stat->atime = inode->i_atime;
68735+ stat->mtime = inode->i_mtime;
68736+ }
68737 stat->ctime = inode->i_ctime;
68738 stat->blksize = (1 << inode->i_blkbits);
68739 stat->blocks = inode->i_blocks;
68740@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
68741 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
68742 {
68743 struct inode *inode = path->dentry->d_inode;
68744+ int retval;
68745
68746- if (inode->i_op->getattr)
68747- return inode->i_op->getattr(path->mnt, path->dentry, stat);
68748+ if (inode->i_op->getattr) {
68749+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
68750+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
68751+ stat->atime = stat->ctime;
68752+ stat->mtime = stat->ctime;
68753+ }
68754+ return retval;
68755+ }
68756
68757 generic_fillattr(inode, stat);
68758 return 0;
68759diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
68760index 0b45ff4..847de5b 100644
68761--- a/fs/sysfs/dir.c
68762+++ b/fs/sysfs/dir.c
68763@@ -41,9 +41,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
68764 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
68765 {
68766 struct kernfs_node *parent, *kn;
68767+ const char *name;
68768+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
68769+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68770+ const char *parent_name;
68771+#endif
68772
68773 BUG_ON(!kobj);
68774
68775+ name = kobject_name(kobj);
68776+
68777 if (kobj->parent)
68778 parent = kobj->parent->sd;
68779 else
68780@@ -52,11 +59,22 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
68781 if (!parent)
68782 return -ENOENT;
68783
68784- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
68785- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
68786+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68787+ parent_name = parent->name;
68788+ mode = S_IRWXU;
68789+
68790+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
68791+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
68792+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
68793+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
68794+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
68795+#endif
68796+
68797+ kn = kernfs_create_dir_ns(parent, name,
68798+ mode, kobj, ns);
68799 if (IS_ERR(kn)) {
68800 if (PTR_ERR(kn) == -EEXIST)
68801- sysfs_warn_dup(parent, kobject_name(kobj));
68802+ sysfs_warn_dup(parent, name);
68803 return PTR_ERR(kn);
68804 }
68805
68806diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
68807index 69d4889..a810bd4 100644
68808--- a/fs/sysv/sysv.h
68809+++ b/fs/sysv/sysv.h
68810@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
68811 #endif
68812 }
68813
68814-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
68815+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
68816 {
68817 if (sbi->s_bytesex == BYTESEX_PDP)
68818 return PDP_swab((__force __u32)n);
68819diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
68820index 2290d58..7791371 100644
68821--- a/fs/ubifs/io.c
68822+++ b/fs/ubifs/io.c
68823@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
68824 return err;
68825 }
68826
68827-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
68828+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
68829 {
68830 int err;
68831
68832diff --git a/fs/udf/misc.c b/fs/udf/misc.c
68833index c175b4d..8f36a16 100644
68834--- a/fs/udf/misc.c
68835+++ b/fs/udf/misc.c
68836@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
68837
68838 u8 udf_tag_checksum(const struct tag *t)
68839 {
68840- u8 *data = (u8 *)t;
68841+ const u8 *data = (const u8 *)t;
68842 u8 checksum = 0;
68843 int i;
68844 for (i = 0; i < sizeof(struct tag); ++i)
68845diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
68846index 8d974c4..b82f6ec 100644
68847--- a/fs/ufs/swab.h
68848+++ b/fs/ufs/swab.h
68849@@ -22,7 +22,7 @@ enum {
68850 BYTESEX_BE
68851 };
68852
68853-static inline u64
68854+static inline u64 __intentional_overflow(-1)
68855 fs64_to_cpu(struct super_block *sbp, __fs64 n)
68856 {
68857 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
68858@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
68859 return (__force __fs64)cpu_to_be64(n);
68860 }
68861
68862-static inline u32
68863+static inline u32 __intentional_overflow(-1)
68864 fs32_to_cpu(struct super_block *sbp, __fs32 n)
68865 {
68866 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
68867diff --git a/fs/utimes.c b/fs/utimes.c
68868index aa138d6..5f3a811 100644
68869--- a/fs/utimes.c
68870+++ b/fs/utimes.c
68871@@ -1,6 +1,7 @@
68872 #include <linux/compiler.h>
68873 #include <linux/file.h>
68874 #include <linux/fs.h>
68875+#include <linux/security.h>
68876 #include <linux/linkage.h>
68877 #include <linux/mount.h>
68878 #include <linux/namei.h>
68879@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
68880 }
68881 }
68882 retry_deleg:
68883+
68884+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
68885+ error = -EACCES;
68886+ goto mnt_drop_write_and_out;
68887+ }
68888+
68889 mutex_lock(&inode->i_mutex);
68890 error = notify_change(path->dentry, &newattrs, &delegated_inode);
68891 mutex_unlock(&inode->i_mutex);
68892diff --git a/fs/xattr.c b/fs/xattr.c
68893index c69e6d4..cc56af5 100644
68894--- a/fs/xattr.c
68895+++ b/fs/xattr.c
68896@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
68897 return rc;
68898 }
68899
68900+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
68901+ssize_t
68902+pax_getxattr(struct dentry *dentry, void *value, size_t size)
68903+{
68904+ struct inode *inode = dentry->d_inode;
68905+ ssize_t error;
68906+
68907+ error = inode_permission(inode, MAY_EXEC);
68908+ if (error)
68909+ return error;
68910+
68911+ if (inode->i_op->getxattr)
68912+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
68913+ else
68914+ error = -EOPNOTSUPP;
68915+
68916+ return error;
68917+}
68918+EXPORT_SYMBOL(pax_getxattr);
68919+#endif
68920+
68921 ssize_t
68922 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
68923 {
68924@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
68925 * Extended attribute SET operations
68926 */
68927 static long
68928-setxattr(struct dentry *d, const char __user *name, const void __user *value,
68929+setxattr(struct path *path, const char __user *name, const void __user *value,
68930 size_t size, int flags)
68931 {
68932 int error;
68933@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
68934 posix_acl_fix_xattr_from_user(kvalue, size);
68935 }
68936
68937- error = vfs_setxattr(d, kname, kvalue, size, flags);
68938+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
68939+ error = -EACCES;
68940+ goto out;
68941+ }
68942+
68943+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
68944 out:
68945 if (vvalue)
68946 vfree(vvalue);
68947@@ -377,7 +403,7 @@ retry:
68948 return error;
68949 error = mnt_want_write(path.mnt);
68950 if (!error) {
68951- error = setxattr(path.dentry, name, value, size, flags);
68952+ error = setxattr(&path, name, value, size, flags);
68953 mnt_drop_write(path.mnt);
68954 }
68955 path_put(&path);
68956@@ -401,7 +427,7 @@ retry:
68957 return error;
68958 error = mnt_want_write(path.mnt);
68959 if (!error) {
68960- error = setxattr(path.dentry, name, value, size, flags);
68961+ error = setxattr(&path, name, value, size, flags);
68962 mnt_drop_write(path.mnt);
68963 }
68964 path_put(&path);
68965@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
68966 const void __user *,value, size_t, size, int, flags)
68967 {
68968 struct fd f = fdget(fd);
68969- struct dentry *dentry;
68970 int error = -EBADF;
68971
68972 if (!f.file)
68973 return error;
68974- dentry = f.file->f_path.dentry;
68975- audit_inode(NULL, dentry, 0);
68976+ audit_inode(NULL, f.file->f_path.dentry, 0);
68977 error = mnt_want_write_file(f.file);
68978 if (!error) {
68979- error = setxattr(dentry, name, value, size, flags);
68980+ error = setxattr(&f.file->f_path, name, value, size, flags);
68981 mnt_drop_write_file(f.file);
68982 }
68983 fdput(f);
68984@@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
68985 * Extended attribute REMOVE operations
68986 */
68987 static long
68988-removexattr(struct dentry *d, const char __user *name)
68989+removexattr(struct path *path, const char __user *name)
68990 {
68991 int error;
68992 char kname[XATTR_NAME_MAX + 1];
68993@@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name)
68994 if (error < 0)
68995 return error;
68996
68997- return vfs_removexattr(d, kname);
68998+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
68999+ return -EACCES;
69000+
69001+ return vfs_removexattr(path->dentry, kname);
69002 }
69003
69004 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
69005@@ -652,7 +679,7 @@ retry:
69006 return error;
69007 error = mnt_want_write(path.mnt);
69008 if (!error) {
69009- error = removexattr(path.dentry, name);
69010+ error = removexattr(&path, name);
69011 mnt_drop_write(path.mnt);
69012 }
69013 path_put(&path);
69014@@ -675,7 +702,7 @@ retry:
69015 return error;
69016 error = mnt_want_write(path.mnt);
69017 if (!error) {
69018- error = removexattr(path.dentry, name);
69019+ error = removexattr(&path, name);
69020 mnt_drop_write(path.mnt);
69021 }
69022 path_put(&path);
69023@@ -689,16 +716,16 @@ retry:
69024 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
69025 {
69026 struct fd f = fdget(fd);
69027- struct dentry *dentry;
69028+ struct path *path;
69029 int error = -EBADF;
69030
69031 if (!f.file)
69032 return error;
69033- dentry = f.file->f_path.dentry;
69034- audit_inode(NULL, dentry, 0);
69035+ path = &f.file->f_path;
69036+ audit_inode(NULL, path->dentry, 0);
69037 error = mnt_want_write_file(f.file);
69038 if (!error) {
69039- error = removexattr(dentry, name);
69040+ error = removexattr(path, name);
69041 mnt_drop_write_file(f.file);
69042 }
69043 fdput(f);
69044diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
69045index 75c3fe5..b0f6bbe 100644
69046--- a/fs/xfs/xfs_bmap.c
69047+++ b/fs/xfs/xfs_bmap.c
69048@@ -583,7 +583,7 @@ xfs_bmap_validate_ret(
69049
69050 #else
69051 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
69052-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
69053+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
69054 #endif /* DEBUG */
69055
69056 /*
69057diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
69058index 48e99af..54ebae3 100644
69059--- a/fs/xfs/xfs_dir2_readdir.c
69060+++ b/fs/xfs/xfs_dir2_readdir.c
69061@@ -159,7 +159,12 @@ xfs_dir2_sf_getdents(
69062 ino = dp->d_ops->sf_get_ino(sfp, sfep);
69063 filetype = dp->d_ops->sf_get_ftype(sfep);
69064 ctx->pos = off & 0x7fffffff;
69065- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
69066+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
69067+ char name[sfep->namelen];
69068+ memcpy(name, sfep->name, sfep->namelen);
69069+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
69070+ return 0;
69071+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
69072 xfs_dir3_get_dtype(dp->i_mount, filetype)))
69073 return 0;
69074 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
69075diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
69076index 8bc1bbc..0d6911b 100644
69077--- a/fs/xfs/xfs_ioctl.c
69078+++ b/fs/xfs/xfs_ioctl.c
69079@@ -122,7 +122,7 @@ xfs_find_handle(
69080 }
69081
69082 error = -EFAULT;
69083- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
69084+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
69085 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
69086 goto out_put;
69087
69088diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
69089new file mode 100644
69090index 0000000..27cec32
69091--- /dev/null
69092+++ b/grsecurity/Kconfig
69093@@ -0,0 +1,1166 @@
69094+#
69095+# grecurity configuration
69096+#
69097+menu "Memory Protections"
69098+depends on GRKERNSEC
69099+
69100+config GRKERNSEC_KMEM
69101+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
69102+ default y if GRKERNSEC_CONFIG_AUTO
69103+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
69104+ help
69105+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
69106+ be written to or read from to modify or leak the contents of the running
69107+ kernel. /dev/port will also not be allowed to be opened, writing to
69108+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
69109+ If you have module support disabled, enabling this will close up several
69110+ ways that are currently used to insert malicious code into the running
69111+ kernel.
69112+
69113+ Even with this feature enabled, we still highly recommend that
69114+ you use the RBAC system, as it is still possible for an attacker to
69115+ modify the running kernel through other more obscure methods.
69116+
69117+ It is highly recommended that you say Y here if you meet all the
69118+ conditions above.
69119+
69120+config GRKERNSEC_VM86
69121+ bool "Restrict VM86 mode"
69122+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69123+ depends on X86_32
69124+
69125+ help
69126+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
69127+ make use of a special execution mode on 32bit x86 processors called
69128+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
69129+ video cards and will still work with this option enabled. The purpose
69130+ of the option is to prevent exploitation of emulation errors in
69131+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
69132+ Nearly all users should be able to enable this option.
69133+
69134+config GRKERNSEC_IO
69135+ bool "Disable privileged I/O"
69136+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69137+ depends on X86
69138+ select RTC_CLASS
69139+ select RTC_INTF_DEV
69140+ select RTC_DRV_CMOS
69141+
69142+ help
69143+ If you say Y here, all ioperm and iopl calls will return an error.
69144+ Ioperm and iopl can be used to modify the running kernel.
69145+ Unfortunately, some programs need this access to operate properly,
69146+ the most notable of which are XFree86 and hwclock. hwclock can be
69147+ remedied by having RTC support in the kernel, so real-time
69148+ clock support is enabled if this option is enabled, to ensure
69149+ that hwclock operates correctly. If hwclock still does not work,
69150+ either update udev or symlink /dev/rtc to /dev/rtc0.
69151+
69152+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
69153+ you may not be able to boot into a graphical environment with this
69154+ option enabled. In this case, you should use the RBAC system instead.
69155+
69156+config GRKERNSEC_JIT_HARDEN
69157+ bool "Harden BPF JIT against spray attacks"
69158+ default y if GRKERNSEC_CONFIG_AUTO
69159+ depends on BPF_JIT && X86
69160+ help
69161+ If you say Y here, the native code generated by the kernel's Berkeley
69162+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
69163+ attacks that attempt to fit attacker-beneficial instructions in
69164+ 32bit immediate fields of JIT-generated native instructions. The
69165+ attacker will generally aim to cause an unintended instruction sequence
69166+ of JIT-generated native code to execute by jumping into the middle of
69167+ a generated instruction. This feature effectively randomizes the 32bit
69168+ immediate constants present in the generated code to thwart such attacks.
69169+
69170+ If you're using KERNEXEC, it's recommended that you enable this option
69171+ to supplement the hardening of the kernel.
69172+
69173+config GRKERNSEC_PERF_HARDEN
69174+ bool "Disable unprivileged PERF_EVENTS usage by default"
69175+ default y if GRKERNSEC_CONFIG_AUTO
69176+ depends on PERF_EVENTS
69177+ help
69178+ If you say Y here, the range of acceptable values for the
69179+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
69180+ default to a new value: 3. When the sysctl is set to this value, no
69181+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
69182+
69183+ Though PERF_EVENTS can be used legitimately for performance monitoring
69184+ and low-level application profiling, it is forced on regardless of
69185+ configuration, has been at fault for several vulnerabilities, and
69186+ creates new opportunities for side channels and other information leaks.
69187+
69188+ This feature puts PERF_EVENTS into a secure default state and permits
69189+ the administrator to change out of it temporarily if unprivileged
69190+ application profiling is needed.
69191+
69192+config GRKERNSEC_RAND_THREADSTACK
69193+ bool "Insert random gaps between thread stacks"
69194+ default y if GRKERNSEC_CONFIG_AUTO
69195+ depends on PAX_RANDMMAP && !PPC
69196+ help
69197+ If you say Y here, a random-sized gap will be enforced between allocated
69198+ thread stacks. Glibc's NPTL and other threading libraries that
69199+ pass MAP_STACK to the kernel for thread stack allocation are supported.
69200+ The implementation currently provides 8 bits of entropy for the gap.
69201+
69202+ Many distributions do not compile threaded remote services with the
69203+ -fstack-check argument to GCC, causing the variable-sized stack-based
69204+ allocator, alloca(), to not probe the stack on allocation. This
69205+ permits an unbounded alloca() to skip over any guard page and potentially
69206+ modify another thread's stack reliably. An enforced random gap
69207+ reduces the reliability of such an attack and increases the chance
69208+ that such a read/write to another thread's stack instead lands in
69209+ an unmapped area, causing a crash and triggering grsecurity's
69210+ anti-bruteforcing logic.
69211+
69212+config GRKERNSEC_PROC_MEMMAP
69213+ bool "Harden ASLR against information leaks and entropy reduction"
69214+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
69215+ depends on PAX_NOEXEC || PAX_ASLR
69216+ help
69217+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
69218+ give no information about the addresses of its mappings if
69219+ PaX features that rely on random addresses are enabled on the task.
69220+ In addition to sanitizing this information and disabling other
69221+ dangerous sources of information, this option causes reads of sensitive
69222+ /proc/<pid> entries where the file descriptor was opened in a different
69223+ task than the one performing the read. Such attempts are logged.
69224+ This option also limits argv/env strings for suid/sgid binaries
69225+ to 512KB to prevent a complete exhaustion of the stack entropy provided
69226+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
69227+ binaries to prevent alternative mmap layouts from being abused.
69228+
69229+ If you use PaX it is essential that you say Y here as it closes up
69230+ several holes that make full ASLR useless locally.
69231+
69232+
69233+config GRKERNSEC_KSTACKOVERFLOW
69234+ bool "Prevent kernel stack overflows"
69235+ default y if GRKERNSEC_CONFIG_AUTO
69236+ depends on !IA64 && 64BIT
69237+ help
69238+ If you say Y here, the kernel's process stacks will be allocated
69239+ with vmalloc instead of the kernel's default allocator. This
69240+ introduces guard pages that in combination with the alloca checking
69241+ of the STACKLEAK feature prevents all forms of kernel process stack
69242+ overflow abuse. Note that this is different from kernel stack
69243+ buffer overflows.
69244+
69245+config GRKERNSEC_BRUTE
69246+ bool "Deter exploit bruteforcing"
69247+ default y if GRKERNSEC_CONFIG_AUTO
69248+ help
69249+ If you say Y here, attempts to bruteforce exploits against forking
69250+ daemons such as apache or sshd, as well as against suid/sgid binaries
69251+ will be deterred. When a child of a forking daemon is killed by PaX
69252+ or crashes due to an illegal instruction or other suspicious signal,
69253+ the parent process will be delayed 30 seconds upon every subsequent
69254+ fork until the administrator is able to assess the situation and
69255+ restart the daemon.
69256+ In the suid/sgid case, the attempt is logged, the user has all their
69257+ existing instances of the suid/sgid binary terminated and will
69258+ be unable to execute any suid/sgid binaries for 15 minutes.
69259+
69260+ It is recommended that you also enable signal logging in the auditing
69261+ section so that logs are generated when a process triggers a suspicious
69262+ signal.
69263+ If the sysctl option is enabled, a sysctl option with name
69264+ "deter_bruteforce" is created.
69265+
69266+config GRKERNSEC_MODHARDEN
69267+ bool "Harden module auto-loading"
69268+ default y if GRKERNSEC_CONFIG_AUTO
69269+ depends on MODULES
69270+ help
69271+ If you say Y here, module auto-loading in response to use of some
69272+ feature implemented by an unloaded module will be restricted to
69273+ root users. Enabling this option helps defend against attacks
69274+ by unprivileged users who abuse the auto-loading behavior to
69275+ cause a vulnerable module to load that is then exploited.
69276+
69277+ If this option prevents a legitimate use of auto-loading for a
69278+ non-root user, the administrator can execute modprobe manually
69279+ with the exact name of the module mentioned in the alert log.
69280+ Alternatively, the administrator can add the module to the list
69281+ of modules loaded at boot by modifying init scripts.
69282+
69283+ Modification of init scripts will most likely be needed on
69284+ Ubuntu servers with encrypted home directory support enabled,
69285+ as the first non-root user logging in will cause the ecb(aes),
69286+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
69287+
69288+config GRKERNSEC_HIDESYM
69289+ bool "Hide kernel symbols"
69290+ default y if GRKERNSEC_CONFIG_AUTO
69291+ select PAX_USERCOPY_SLABS
69292+ help
69293+ If you say Y here, getting information on loaded modules, and
69294+ displaying all kernel symbols through a syscall will be restricted
69295+ to users with CAP_SYS_MODULE. For software compatibility reasons,
69296+ /proc/kallsyms will be restricted to the root user. The RBAC
69297+ system can hide that entry even from root.
69298+
69299+ This option also prevents leaking of kernel addresses through
69300+ several /proc entries.
69301+
69302+ Note that this option is only effective provided the following
69303+ conditions are met:
69304+ 1) The kernel using grsecurity is not precompiled by some distribution
69305+ 2) You have also enabled GRKERNSEC_DMESG
69306+ 3) You are using the RBAC system and hiding other files such as your
69307+ kernel image and System.map. Alternatively, enabling this option
69308+ causes the permissions on /boot, /lib/modules, and the kernel
69309+ source directory to change at compile time to prevent
69310+ reading by non-root users.
69311+ If the above conditions are met, this option will aid in providing a
69312+ useful protection against local kernel exploitation of overflows
69313+ and arbitrary read/write vulnerabilities.
69314+
69315+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
69316+ in addition to this feature.
69317+
69318+config GRKERNSEC_RANDSTRUCT
69319+ bool "Randomize layout of sensitive kernel structures"
69320+ default y if GRKERNSEC_CONFIG_AUTO
69321+ select GRKERNSEC_HIDESYM
69322+ select MODVERSIONS if MODULES
69323+ help
69324+ If you say Y here, the layouts of a number of sensitive kernel
69325+ structures (task, fs, cred, etc) and all structures composed entirely
69326+ of function pointers (aka "ops" structs) will be randomized at compile-time.
69327+ This can introduce the requirement of an additional infoleak
69328+ vulnerability for exploits targeting these structure types.
69329+
69330+ Enabling this feature will introduce some performance impact, slightly
69331+ increase memory usage, and prevent the use of forensic tools like
69332+ Volatility against the system (unless the kernel source tree isn't
69333+ cleaned after kernel installation).
69334+
69335+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
69336+ It remains after a make clean to allow for external modules to be compiled
69337+ with the existing seed and will be removed by a make mrproper or
69338+ make distclean.
69339+
69340+ Note that the implementation requires gcc 4.6.4. or newer. You may need
69341+ to install the supporting headers explicitly in addition to the normal
69342+ gcc package.
69343+
69344+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
69345+ bool "Use cacheline-aware structure randomization"
69346+ depends on GRKERNSEC_RANDSTRUCT
69347+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
69348+ help
69349+ If you say Y here, the RANDSTRUCT randomization will make a best effort
69350+ at restricting randomization to cacheline-sized groups of elements. It
69351+ will further not randomize bitfields in structures. This reduces the
69352+ performance hit of RANDSTRUCT at the cost of weakened randomization.
69353+
69354+config GRKERNSEC_KERN_LOCKOUT
69355+ bool "Active kernel exploit response"
69356+ default y if GRKERNSEC_CONFIG_AUTO
69357+ depends on X86 || ARM || PPC || SPARC
69358+ help
69359+ If you say Y here, when a PaX alert is triggered due to suspicious
69360+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
69361+ or an OOPS occurs due to bad memory accesses, instead of just
69362+ terminating the offending process (and potentially allowing
69363+ a subsequent exploit from the same user), we will take one of two
69364+ actions:
69365+ If the user was root, we will panic the system
69366+ If the user was non-root, we will log the attempt, terminate
69367+ all processes owned by the user, then prevent them from creating
69368+ any new processes until the system is restarted
69369+ This deters repeated kernel exploitation/bruteforcing attempts
69370+ and is useful for later forensics.
69371+
69372+config GRKERNSEC_OLD_ARM_USERLAND
69373+ bool "Old ARM userland compatibility"
69374+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
69375+ help
69376+ If you say Y here, stubs of executable code to perform such operations
69377+ as "compare-exchange" will be placed at fixed locations in the ARM vector
69378+ table. This is unfortunately needed for old ARM userland meant to run
69379+ across a wide range of processors. Without this option enabled,
69380+ the get_tls and data memory barrier stubs will be emulated by the kernel,
69381+ which is enough for Linaro userlands or other userlands designed for v6
69382+ and newer ARM CPUs. It's recommended that you try without this option enabled
69383+ first, and only enable it if your userland does not boot (it will likely fail
69384+ at init time).
69385+
69386+endmenu
69387+menu "Role Based Access Control Options"
69388+depends on GRKERNSEC
69389+
69390+config GRKERNSEC_RBAC_DEBUG
69391+ bool
69392+
69393+config GRKERNSEC_NO_RBAC
69394+ bool "Disable RBAC system"
69395+ help
69396+ If you say Y here, the /dev/grsec device will be removed from the kernel,
69397+ preventing the RBAC system from being enabled. You should only say Y
69398+ here if you have no intention of using the RBAC system, so as to prevent
69399+ an attacker with root access from misusing the RBAC system to hide files
69400+ and processes when loadable module support and /dev/[k]mem have been
69401+ locked down.
69402+
69403+config GRKERNSEC_ACL_HIDEKERN
69404+ bool "Hide kernel processes"
69405+ help
69406+ If you say Y here, all kernel threads will be hidden to all
69407+ processes but those whose subject has the "view hidden processes"
69408+ flag.
69409+
69410+config GRKERNSEC_ACL_MAXTRIES
69411+ int "Maximum tries before password lockout"
69412+ default 3
69413+ help
69414+ This option enforces the maximum number of times a user can attempt
69415+ to authorize themselves with the grsecurity RBAC system before being
69416+ denied the ability to attempt authorization again for a specified time.
69417+ The lower the number, the harder it will be to brute-force a password.
69418+
69419+config GRKERNSEC_ACL_TIMEOUT
69420+ int "Time to wait after max password tries, in seconds"
69421+ default 30
69422+ help
69423+ This option specifies the time the user must wait after attempting to
69424+ authorize to the RBAC system with the maximum number of invalid
69425+ passwords. The higher the number, the harder it will be to brute-force
69426+ a password.
69427+
69428+endmenu
69429+menu "Filesystem Protections"
69430+depends on GRKERNSEC
69431+
69432+config GRKERNSEC_PROC
69433+ bool "Proc restrictions"
69434+ default y if GRKERNSEC_CONFIG_AUTO
69435+ help
69436+ If you say Y here, the permissions of the /proc filesystem
69437+ will be altered to enhance system security and privacy. You MUST
69438+ choose either a user only restriction or a user and group restriction.
69439+ Depending upon the option you choose, you can either restrict users to
69440+ see only the processes they themselves run, or choose a group that can
69441+ view all processes and files normally restricted to root if you choose
69442+ the "restrict to user only" option. NOTE: If you're running identd or
69443+ ntpd as a non-root user, you will have to run it as the group you
69444+ specify here.
69445+
69446+config GRKERNSEC_PROC_USER
69447+ bool "Restrict /proc to user only"
69448+ depends on GRKERNSEC_PROC
69449+ help
69450+ If you say Y here, non-root users will only be able to view their own
69451+ processes, and restricts them from viewing network-related information,
69452+ and viewing kernel symbol and module information.
69453+
69454+config GRKERNSEC_PROC_USERGROUP
69455+ bool "Allow special group"
69456+ default y if GRKERNSEC_CONFIG_AUTO
69457+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
69458+ help
69459+ If you say Y here, you will be able to select a group that will be
69460+ able to view all processes and network-related information. If you've
69461+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
69462+ remain hidden. This option is useful if you want to run identd as
69463+ a non-root user. The group you select may also be chosen at boot time
69464+ via "grsec_proc_gid=" on the kernel commandline.
69465+
69466+config GRKERNSEC_PROC_GID
69467+ int "GID for special group"
69468+ depends on GRKERNSEC_PROC_USERGROUP
69469+ default 1001
69470+
69471+config GRKERNSEC_PROC_ADD
69472+ bool "Additional restrictions"
69473+ default y if GRKERNSEC_CONFIG_AUTO
69474+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
69475+ help
69476+ If you say Y here, additional restrictions will be placed on
69477+ /proc that keep normal users from viewing device information and
69478+ slabinfo information that could be useful for exploits.
69479+
69480+config GRKERNSEC_LINK
69481+ bool "Linking restrictions"
69482+ default y if GRKERNSEC_CONFIG_AUTO
69483+ help
69484+ If you say Y here, /tmp race exploits will be prevented, since users
69485+ will no longer be able to follow symlinks owned by other users in
69486+ world-writable +t directories (e.g. /tmp), unless the owner of the
69487+ symlink is the owner of the directory. users will also not be
69488+ able to hardlink to files they do not own. If the sysctl option is
69489+ enabled, a sysctl option with name "linking_restrictions" is created.
69490+
69491+config GRKERNSEC_SYMLINKOWN
69492+ bool "Kernel-enforced SymlinksIfOwnerMatch"
69493+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
69494+ help
69495+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
69496+ that prevents it from being used as a security feature. As Apache
69497+ verifies the symlink by performing a stat() against the target of
69498+ the symlink before it is followed, an attacker can setup a symlink
69499+ to point to a same-owned file, then replace the symlink with one
69500+ that targets another user's file just after Apache "validates" the
69501+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
69502+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
69503+ will be in place for the group you specify. If the sysctl option
69504+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
69505+ created.
69506+
69507+config GRKERNSEC_SYMLINKOWN_GID
69508+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
69509+ depends on GRKERNSEC_SYMLINKOWN
69510+ default 1006
69511+ help
69512+ Setting this GID determines what group kernel-enforced
69513+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
69514+ is enabled, a sysctl option with name "symlinkown_gid" is created.
69515+
69516+config GRKERNSEC_FIFO
69517+ bool "FIFO restrictions"
69518+ default y if GRKERNSEC_CONFIG_AUTO
69519+ help
69520+ If you say Y here, users will not be able to write to FIFOs they don't
69521+ own in world-writable +t directories (e.g. /tmp), unless the owner of
69522+ the FIFO is the same owner of the directory it's held in. If the sysctl
69523+ option is enabled, a sysctl option with name "fifo_restrictions" is
69524+ created.
69525+
69526+config GRKERNSEC_SYSFS_RESTRICT
69527+ bool "Sysfs/debugfs restriction"
69528+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69529+ depends on SYSFS
69530+ help
69531+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
69532+ any filesystem normally mounted under it (e.g. debugfs) will be
69533+ mostly accessible only by root. These filesystems generally provide access
69534+ to hardware and debug information that isn't appropriate for unprivileged
69535+ users of the system. Sysfs and debugfs have also become a large source
69536+ of new vulnerabilities, ranging from infoleaks to local compromise.
69537+ There has been very little oversight with an eye toward security involved
69538+ in adding new exporters of information to these filesystems, so their
69539+ use is discouraged.
69540+ For reasons of compatibility, a few directories have been whitelisted
69541+ for access by non-root users:
69542+ /sys/fs/selinux
69543+ /sys/fs/fuse
69544+ /sys/devices/system/cpu
69545+
69546+config GRKERNSEC_ROFS
69547+ bool "Runtime read-only mount protection"
69548+ depends on SYSCTL
69549+ help
69550+ If you say Y here, a sysctl option with name "romount_protect" will
69551+ be created. By setting this option to 1 at runtime, filesystems
69552+ will be protected in the following ways:
69553+ * No new writable mounts will be allowed
69554+ * Existing read-only mounts won't be able to be remounted read/write
69555+ * Write operations will be denied on all block devices
69556+ This option acts independently of grsec_lock: once it is set to 1,
69557+ it cannot be turned off. Therefore, please be mindful of the resulting
69558+ behavior if this option is enabled in an init script on a read-only
69559+ filesystem.
69560+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
69561+ and GRKERNSEC_IO should be enabled and module loading disabled via
69562+ config or at runtime.
69563+ This feature is mainly intended for secure embedded systems.
69564+
69565+
69566+config GRKERNSEC_DEVICE_SIDECHANNEL
69567+ bool "Eliminate stat/notify-based device sidechannels"
69568+ default y if GRKERNSEC_CONFIG_AUTO
69569+ help
69570+ If you say Y here, timing analyses on block or character
69571+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
69572+ will be thwarted for unprivileged users. If a process without
69573+ CAP_MKNOD stats such a device, the last access and last modify times
69574+ will match the device's create time. No access or modify events
69575+ will be triggered through inotify/dnotify/fanotify for such devices.
69576+ This feature will prevent attacks that may at a minimum
69577+ allow an attacker to determine the administrator's password length.
69578+
69579+config GRKERNSEC_CHROOT
69580+ bool "Chroot jail restrictions"
69581+ default y if GRKERNSEC_CONFIG_AUTO
69582+ help
69583+ If you say Y here, you will be able to choose several options that will
69584+ make breaking out of a chrooted jail much more difficult. If you
69585+ encounter no software incompatibilities with the following options, it
69586+ is recommended that you enable each one.
69587+
69588+ Note that the chroot restrictions are not intended to apply to "chroots"
69589+ to directories that are simple bind mounts of the global root filesystem.
69590+ For several other reasons, a user shouldn't expect any significant
69591+ security by performing such a chroot.
69592+
69593+config GRKERNSEC_CHROOT_MOUNT
69594+ bool "Deny mounts"
69595+ default y if GRKERNSEC_CONFIG_AUTO
69596+ depends on GRKERNSEC_CHROOT
69597+ help
69598+ If you say Y here, processes inside a chroot will not be able to
69599+ mount or remount filesystems. If the sysctl option is enabled, a
69600+ sysctl option with name "chroot_deny_mount" is created.
69601+
69602+config GRKERNSEC_CHROOT_DOUBLE
69603+ bool "Deny double-chroots"
69604+ default y if GRKERNSEC_CONFIG_AUTO
69605+ depends on GRKERNSEC_CHROOT
69606+ help
69607+ If you say Y here, processes inside a chroot will not be able to chroot
69608+ again outside the chroot. This is a widely used method of breaking
69609+ out of a chroot jail and should not be allowed. If the sysctl
69610+ option is enabled, a sysctl option with name
69611+ "chroot_deny_chroot" is created.
69612+
69613+config GRKERNSEC_CHROOT_PIVOT
69614+ bool "Deny pivot_root in chroot"
69615+ default y if GRKERNSEC_CONFIG_AUTO
69616+ depends on GRKERNSEC_CHROOT
69617+ help
69618+ If you say Y here, processes inside a chroot will not be able to use
69619+ a function called pivot_root() that was introduced in Linux 2.3.41. It
69620+ works similar to chroot in that it changes the root filesystem. This
69621+ function could be misused in a chrooted process to attempt to break out
69622+ of the chroot, and therefore should not be allowed. If the sysctl
69623+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
69624+ created.
69625+
69626+config GRKERNSEC_CHROOT_CHDIR
69627+ bool "Enforce chdir(\"/\") on all chroots"
69628+ default y if GRKERNSEC_CONFIG_AUTO
69629+ depends on GRKERNSEC_CHROOT
69630+ help
69631+ If you say Y here, the current working directory of all newly-chrooted
69632+ applications will be set to the the root directory of the chroot.
69633+ The man page on chroot(2) states:
69634+ Note that this call does not change the current working
69635+ directory, so that `.' can be outside the tree rooted at
69636+ `/'. In particular, the super-user can escape from a
69637+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
69638+
69639+ It is recommended that you say Y here, since it's not known to break
69640+ any software. If the sysctl option is enabled, a sysctl option with
69641+ name "chroot_enforce_chdir" is created.
69642+
69643+config GRKERNSEC_CHROOT_CHMOD
69644+ bool "Deny (f)chmod +s"
69645+ default y if GRKERNSEC_CONFIG_AUTO
69646+ depends on GRKERNSEC_CHROOT
69647+ help
69648+ If you say Y here, processes inside a chroot will not be able to chmod
69649+ or fchmod files to make them have suid or sgid bits. This protects
69650+ against another published method of breaking a chroot. If the sysctl
69651+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
69652+ created.
69653+
69654+config GRKERNSEC_CHROOT_FCHDIR
69655+ bool "Deny fchdir and fhandle out of chroot"
69656+ default y if GRKERNSEC_CONFIG_AUTO
69657+ depends on GRKERNSEC_CHROOT
69658+ help
69659+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
69660+ to a file descriptor of the chrooting process that points to a directory
69661+ outside the filesystem will be stopped. Additionally, this option prevents
69662+ use of the recently-created syscall for opening files by a guessable "file
69663+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
69664+ with name "chroot_deny_fchdir" is created.
69665+
69666+config GRKERNSEC_CHROOT_MKNOD
69667+ bool "Deny mknod"
69668+ default y if GRKERNSEC_CONFIG_AUTO
69669+ depends on GRKERNSEC_CHROOT
69670+ help
69671+ If you say Y here, processes inside a chroot will not be allowed to
69672+ mknod. The problem with using mknod inside a chroot is that it
69673+ would allow an attacker to create a device entry that is the same
69674+ as one on the physical root of your system, which could range from
69675+ anything from the console device to a device for your harddrive (which
69676+ they could then use to wipe the drive or steal data). It is recommended
69677+ that you say Y here, unless you run into software incompatibilities.
69678+ If the sysctl option is enabled, a sysctl option with name
69679+ "chroot_deny_mknod" is created.
69680+
69681+config GRKERNSEC_CHROOT_SHMAT
69682+ bool "Deny shmat() out of chroot"
69683+ default y if GRKERNSEC_CONFIG_AUTO
69684+ depends on GRKERNSEC_CHROOT
69685+ help
69686+ If you say Y here, processes inside a chroot will not be able to attach
69687+ to shared memory segments that were created outside of the chroot jail.
69688+ It is recommended that you say Y here. If the sysctl option is enabled,
69689+ a sysctl option with name "chroot_deny_shmat" is created.
69690+
69691+config GRKERNSEC_CHROOT_UNIX
69692+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
69693+ default y if GRKERNSEC_CONFIG_AUTO
69694+ depends on GRKERNSEC_CHROOT
69695+ help
69696+ If you say Y here, processes inside a chroot will not be able to
69697+ connect to abstract (meaning not belonging to a filesystem) Unix
69698+ domain sockets that were bound outside of a chroot. It is recommended
69699+ that you say Y here. If the sysctl option is enabled, a sysctl option
69700+ with name "chroot_deny_unix" is created.
69701+
69702+config GRKERNSEC_CHROOT_FINDTASK
69703+ bool "Protect outside processes"
69704+ default y if GRKERNSEC_CONFIG_AUTO
69705+ depends on GRKERNSEC_CHROOT
69706+ help
69707+ If you say Y here, processes inside a chroot will not be able to
69708+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
69709+ getsid, or view any process outside of the chroot. If the sysctl
69710+ option is enabled, a sysctl option with name "chroot_findtask" is
69711+ created.
69712+
69713+config GRKERNSEC_CHROOT_NICE
69714+ bool "Restrict priority changes"
69715+ default y if GRKERNSEC_CONFIG_AUTO
69716+ depends on GRKERNSEC_CHROOT
69717+ help
69718+ If you say Y here, processes inside a chroot will not be able to raise
69719+ the priority of processes in the chroot, or alter the priority of
69720+ processes outside the chroot. This provides more security than simply
69721+ removing CAP_SYS_NICE from the process' capability set. If the
69722+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
69723+ is created.
69724+
69725+config GRKERNSEC_CHROOT_SYSCTL
69726+ bool "Deny sysctl writes"
69727+ default y if GRKERNSEC_CONFIG_AUTO
69728+ depends on GRKERNSEC_CHROOT
69729+ help
69730+ If you say Y here, an attacker in a chroot will not be able to
69731+ write to sysctl entries, either by sysctl(2) or through a /proc
69732+ interface. It is strongly recommended that you say Y here. If the
69733+ sysctl option is enabled, a sysctl option with name
69734+ "chroot_deny_sysctl" is created.
69735+
69736+config GRKERNSEC_CHROOT_CAPS
69737+ bool "Capability restrictions"
69738+ default y if GRKERNSEC_CONFIG_AUTO
69739+ depends on GRKERNSEC_CHROOT
69740+ help
69741+ If you say Y here, the capabilities on all processes within a
69742+ chroot jail will be lowered to stop module insertion, raw i/o,
69743+ system and net admin tasks, rebooting the system, modifying immutable
69744+ files, modifying IPC owned by another, and changing the system time.
69745+ This is left an option because it can break some apps. Disable this
69746+ if your chrooted apps are having problems performing those kinds of
69747+ tasks. If the sysctl option is enabled, a sysctl option with
69748+ name "chroot_caps" is created.
69749+
69750+config GRKERNSEC_CHROOT_INITRD
69751+ bool "Exempt initrd tasks from restrictions"
69752+ default y if GRKERNSEC_CONFIG_AUTO
69753+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
69754+ help
69755+ If you say Y here, tasks started prior to init will be exempted from
69756+ grsecurity's chroot restrictions. This option is mainly meant to
69757+ resolve Plymouth's performing privileged operations unnecessarily
69758+ in a chroot.
69759+
69760+endmenu
69761+menu "Kernel Auditing"
69762+depends on GRKERNSEC
69763+
69764+config GRKERNSEC_AUDIT_GROUP
69765+ bool "Single group for auditing"
69766+ help
69767+ If you say Y here, the exec and chdir logging features will only operate
69768+ on a group you specify. This option is recommended if you only want to
69769+ watch certain users instead of having a large amount of logs from the
69770+ entire system. If the sysctl option is enabled, a sysctl option with
69771+ name "audit_group" is created.
69772+
69773+config GRKERNSEC_AUDIT_GID
69774+ int "GID for auditing"
69775+ depends on GRKERNSEC_AUDIT_GROUP
69776+ default 1007
69777+
69778+config GRKERNSEC_EXECLOG
69779+ bool "Exec logging"
69780+ help
69781+ If you say Y here, all execve() calls will be logged (since the
69782+ other exec*() calls are frontends to execve(), all execution
69783+ will be logged). Useful for shell-servers that like to keep track
69784+ of their users. If the sysctl option is enabled, a sysctl option with
69785+ name "exec_logging" is created.
69786+ WARNING: This option when enabled will produce a LOT of logs, especially
69787+ on an active system.
69788+
69789+config GRKERNSEC_RESLOG
69790+ bool "Resource logging"
69791+ default y if GRKERNSEC_CONFIG_AUTO
69792+ help
69793+ If you say Y here, all attempts to overstep resource limits will
69794+ be logged with the resource name, the requested size, and the current
69795+ limit. It is highly recommended that you say Y here. If the sysctl
69796+ option is enabled, a sysctl option with name "resource_logging" is
69797+ created. If the RBAC system is enabled, the sysctl value is ignored.
69798+
69799+config GRKERNSEC_CHROOT_EXECLOG
69800+ bool "Log execs within chroot"
69801+ help
69802+ If you say Y here, all executions inside a chroot jail will be logged
69803+ to syslog. This can cause a large amount of logs if certain
69804+ applications (eg. djb's daemontools) are installed on the system, and
69805+ is therefore left as an option. If the sysctl option is enabled, a
69806+ sysctl option with name "chroot_execlog" is created.
69807+
69808+config GRKERNSEC_AUDIT_PTRACE
69809+ bool "Ptrace logging"
69810+ help
69811+ If you say Y here, all attempts to attach to a process via ptrace
69812+ will be logged. If the sysctl option is enabled, a sysctl option
69813+ with name "audit_ptrace" is created.
69814+
69815+config GRKERNSEC_AUDIT_CHDIR
69816+ bool "Chdir logging"
69817+ help
69818+ If you say Y here, all chdir() calls will be logged. If the sysctl
69819+ option is enabled, a sysctl option with name "audit_chdir" is created.
69820+
69821+config GRKERNSEC_AUDIT_MOUNT
69822+ bool "(Un)Mount logging"
69823+ help
69824+ If you say Y here, all mounts and unmounts will be logged. If the
69825+ sysctl option is enabled, a sysctl option with name "audit_mount" is
69826+ created.
69827+
69828+config GRKERNSEC_SIGNAL
69829+ bool "Signal logging"
69830+ default y if GRKERNSEC_CONFIG_AUTO
69831+ help
69832+ If you say Y here, certain important signals will be logged, such as
69833+ SIGSEGV, which will as a result inform you of when a error in a program
69834+ occurred, which in some cases could mean a possible exploit attempt.
69835+ If the sysctl option is enabled, a sysctl option with name
69836+ "signal_logging" is created.
69837+
69838+config GRKERNSEC_FORKFAIL
69839+ bool "Fork failure logging"
69840+ help
69841+ If you say Y here, all failed fork() attempts will be logged.
69842+ This could suggest a fork bomb, or someone attempting to overstep
69843+ their process limit. If the sysctl option is enabled, a sysctl option
69844+ with name "forkfail_logging" is created.
69845+
69846+config GRKERNSEC_TIME
69847+ bool "Time change logging"
69848+ default y if GRKERNSEC_CONFIG_AUTO
69849+ help
69850+ If you say Y here, any changes of the system clock will be logged.
69851+ If the sysctl option is enabled, a sysctl option with name
69852+ "timechange_logging" is created.
69853+
69854+config GRKERNSEC_PROC_IPADDR
69855+ bool "/proc/<pid>/ipaddr support"
69856+ default y if GRKERNSEC_CONFIG_AUTO
69857+ help
69858+ If you say Y here, a new entry will be added to each /proc/<pid>
69859+ directory that contains the IP address of the person using the task.
69860+ The IP is carried across local TCP and AF_UNIX stream sockets.
69861+ This information can be useful for IDS/IPSes to perform remote response
69862+ to a local attack. The entry is readable by only the owner of the
69863+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
69864+ the RBAC system), and thus does not create privacy concerns.
69865+
69866+config GRKERNSEC_RWXMAP_LOG
69867+ bool 'Denied RWX mmap/mprotect logging'
69868+ default y if GRKERNSEC_CONFIG_AUTO
69869+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
69870+ help
69871+ If you say Y here, calls to mmap() and mprotect() with explicit
69872+ usage of PROT_WRITE and PROT_EXEC together will be logged when
69873+ denied by the PAX_MPROTECT feature. This feature will also
69874+ log other problematic scenarios that can occur when PAX_MPROTECT
69875+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
69876+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
69877+ is created.
69878+
69879+endmenu
69880+
69881+menu "Executable Protections"
69882+depends on GRKERNSEC
69883+
69884+config GRKERNSEC_DMESG
69885+ bool "Dmesg(8) restriction"
69886+ default y if GRKERNSEC_CONFIG_AUTO
69887+ help
69888+ If you say Y here, non-root users will not be able to use dmesg(8)
69889+ to view the contents of the kernel's circular log buffer.
69890+ The kernel's log buffer often contains kernel addresses and other
69891+ identifying information useful to an attacker in fingerprinting a
69892+ system for a targeted exploit.
69893+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
69894+ created.
69895+
69896+config GRKERNSEC_HARDEN_PTRACE
69897+ bool "Deter ptrace-based process snooping"
69898+ default y if GRKERNSEC_CONFIG_AUTO
69899+ help
69900+ If you say Y here, TTY sniffers and other malicious monitoring
69901+ programs implemented through ptrace will be defeated. If you
69902+ have been using the RBAC system, this option has already been
69903+ enabled for several years for all users, with the ability to make
69904+ fine-grained exceptions.
69905+
69906+ This option only affects the ability of non-root users to ptrace
69907+ processes that are not a descendent of the ptracing process.
69908+ This means that strace ./binary and gdb ./binary will still work,
69909+ but attaching to arbitrary processes will not. If the sysctl
69910+ option is enabled, a sysctl option with name "harden_ptrace" is
69911+ created.
69912+
69913+config GRKERNSEC_PTRACE_READEXEC
69914+ bool "Require read access to ptrace sensitive binaries"
69915+ default y if GRKERNSEC_CONFIG_AUTO
69916+ help
69917+ If you say Y here, unprivileged users will not be able to ptrace unreadable
69918+ binaries. This option is useful in environments that
69919+ remove the read bits (e.g. file mode 4711) from suid binaries to
69920+ prevent infoleaking of their contents. This option adds
69921+ consistency to the use of that file mode, as the binary could normally
69922+ be read out when run without privileges while ptracing.
69923+
69924+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
69925+ is created.
69926+
69927+config GRKERNSEC_SETXID
69928+ bool "Enforce consistent multithreaded privileges"
69929+ default y if GRKERNSEC_CONFIG_AUTO
69930+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
69931+ help
69932+ If you say Y here, a change from a root uid to a non-root uid
69933+ in a multithreaded application will cause the resulting uids,
69934+ gids, supplementary groups, and capabilities in that thread
69935+ to be propagated to the other threads of the process. In most
69936+ cases this is unnecessary, as glibc will emulate this behavior
69937+ on behalf of the application. Other libcs do not act in the
69938+ same way, allowing the other threads of the process to continue
69939+ running with root privileges. If the sysctl option is enabled,
69940+ a sysctl option with name "consistent_setxid" is created.
69941+
69942+config GRKERNSEC_HARDEN_IPC
69943+ bool "Disallow access to overly-permissive IPC objects"
69944+ default y if GRKERNSEC_CONFIG_AUTO
69945+ depends on SYSVIPC
69946+ help
69947+ If you say Y here, access to overly-permissive IPC objects (shared
69948+ memory, message queues, and semaphores) will be denied for processes
69949+ given the following criteria beyond normal permission checks:
69950+ 1) If the IPC object is world-accessible and the euid doesn't match
69951+ that of the creator or current uid for the IPC object
69952+ 2) If the IPC object is group-accessible and the egid doesn't
69953+ match that of the creator or current gid for the IPC object
69954+ It's a common error to grant too much permission to these objects,
69955+ with impact ranging from denial of service and information leaking to
69956+ privilege escalation. This feature was developed in response to
69957+ research by Tim Brown:
69958+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
69959+ who found hundreds of such insecure usages. Processes with
69960+ CAP_IPC_OWNER are still permitted to access these IPC objects.
69961+ If the sysctl option is enabled, a sysctl option with name
69962+ "harden_ipc" is created.
69963+
69964+config GRKERNSEC_TPE
69965+ bool "Trusted Path Execution (TPE)"
69966+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
69967+ help
69968+ If you say Y here, you will be able to choose a gid to add to the
69969+ supplementary groups of users you want to mark as "untrusted."
69970+ These users will not be able to execute any files that are not in
69971+ root-owned directories writable only by root. If the sysctl option
69972+ is enabled, a sysctl option with name "tpe" is created.
69973+
69974+config GRKERNSEC_TPE_ALL
69975+ bool "Partially restrict all non-root users"
69976+ depends on GRKERNSEC_TPE
69977+ help
69978+ If you say Y here, all non-root users will be covered under
69979+ a weaker TPE restriction. This is separate from, and in addition to,
69980+ the main TPE options that you have selected elsewhere. Thus, if a
69981+ "trusted" GID is chosen, this restriction applies to even that GID.
69982+ Under this restriction, all non-root users will only be allowed to
69983+ execute files in directories they own that are not group or
69984+ world-writable, or in directories owned by root and writable only by
69985+ root. If the sysctl option is enabled, a sysctl option with name
69986+ "tpe_restrict_all" is created.
69987+
69988+config GRKERNSEC_TPE_INVERT
69989+ bool "Invert GID option"
69990+ depends on GRKERNSEC_TPE
69991+ help
69992+ If you say Y here, the group you specify in the TPE configuration will
69993+ decide what group TPE restrictions will be *disabled* for. This
69994+ option is useful if you want TPE restrictions to be applied to most
69995+ users on the system. If the sysctl option is enabled, a sysctl option
69996+ with name "tpe_invert" is created. Unlike other sysctl options, this
69997+ entry will default to on for backward-compatibility.
69998+
69999+config GRKERNSEC_TPE_GID
70000+ int
70001+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
70002+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
70003+
70004+config GRKERNSEC_TPE_UNTRUSTED_GID
70005+ int "GID for TPE-untrusted users"
70006+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
70007+ default 1005
70008+ help
70009+ Setting this GID determines what group TPE restrictions will be
70010+ *enabled* for. If the sysctl option is enabled, a sysctl option
70011+ with name "tpe_gid" is created.
70012+
70013+config GRKERNSEC_TPE_TRUSTED_GID
70014+ int "GID for TPE-trusted users"
70015+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
70016+ default 1005
70017+ help
70018+ Setting this GID determines what group TPE restrictions will be
70019+ *disabled* for. If the sysctl option is enabled, a sysctl option
70020+ with name "tpe_gid" is created.
70021+
70022+endmenu
70023+menu "Network Protections"
70024+depends on GRKERNSEC
70025+
70026+config GRKERNSEC_BLACKHOLE
70027+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
70028+ default y if GRKERNSEC_CONFIG_AUTO
70029+ depends on NET
70030+ help
70031+ If you say Y here, neither TCP resets nor ICMP
70032+ destination-unreachable packets will be sent in response to packets
70033+ sent to ports for which no associated listening process exists.
70034+ This feature supports both IPV4 and IPV6 and exempts the
70035+ loopback interface from blackholing. Enabling this feature
70036+ makes a host more resilient to DoS attacks and reduces network
70037+ visibility against scanners.
70038+
70039+ The blackhole feature as-implemented is equivalent to the FreeBSD
70040+ blackhole feature, as it prevents RST responses to all packets, not
70041+ just SYNs. Under most application behavior this causes no
70042+ problems, but applications (like haproxy) may not close certain
70043+ connections in a way that cleanly terminates them on the remote
70044+ end, leaving the remote host in LAST_ACK state. Because of this
70045+ side-effect and to prevent intentional LAST_ACK DoSes, this
70046+ feature also adds automatic mitigation against such attacks.
70047+ The mitigation drastically reduces the amount of time a socket
70048+ can spend in LAST_ACK state. If you're using haproxy and not
70049+ all servers it connects to have this option enabled, consider
70050+ disabling this feature on the haproxy host.
70051+
70052+ If the sysctl option is enabled, two sysctl options with names
70053+ "ip_blackhole" and "lastack_retries" will be created.
70054+ While "ip_blackhole" takes the standard zero/non-zero on/off
70055+ toggle, "lastack_retries" uses the same kinds of values as
70056+ "tcp_retries1" and "tcp_retries2". The default value of 4
70057+ prevents a socket from lasting more than 45 seconds in LAST_ACK
70058+ state.
70059+
70060+config GRKERNSEC_NO_SIMULT_CONNECT
70061+ bool "Disable TCP Simultaneous Connect"
70062+ default y if GRKERNSEC_CONFIG_AUTO
70063+ depends on NET
70064+ help
70065+ If you say Y here, a feature by Willy Tarreau will be enabled that
70066+ removes a weakness in Linux's strict implementation of TCP that
70067+ allows two clients to connect to each other without either entering
70068+ a listening state. The weakness allows an attacker to easily prevent
70069+ a client from connecting to a known server provided the source port
70070+ for the connection is guessed correctly.
70071+
70072+ As the weakness could be used to prevent an antivirus or IPS from
70073+ fetching updates, or prevent an SSL gateway from fetching a CRL,
70074+ it should be eliminated by enabling this option. Though Linux is
70075+ one of few operating systems supporting simultaneous connect, it
70076+ has no legitimate use in practice and is rarely supported by firewalls.
70077+
70078+config GRKERNSEC_SOCKET
70079+ bool "Socket restrictions"
70080+ depends on NET
70081+ help
70082+ If you say Y here, you will be able to choose from several options.
70083+ If you assign a GID on your system and add it to the supplementary
70084+ groups of users you want to restrict socket access to, this patch
70085+ will perform up to three things, based on the option(s) you choose.
70086+
70087+config GRKERNSEC_SOCKET_ALL
70088+ bool "Deny any sockets to group"
70089+ depends on GRKERNSEC_SOCKET
70090+ help
70091+ If you say Y here, you will be able to choose a GID of whose users will
70092+ be unable to connect to other hosts from your machine or run server
70093+ applications from your machine. If the sysctl option is enabled, a
70094+ sysctl option with name "socket_all" is created.
70095+
70096+config GRKERNSEC_SOCKET_ALL_GID
70097+ int "GID to deny all sockets for"
70098+ depends on GRKERNSEC_SOCKET_ALL
70099+ default 1004
70100+ help
70101+ Here you can choose the GID to disable socket access for. Remember to
70102+ add the users you want socket access disabled for to the GID
70103+ specified here. If the sysctl option is enabled, a sysctl option
70104+ with name "socket_all_gid" is created.
70105+
70106+config GRKERNSEC_SOCKET_CLIENT
70107+ bool "Deny client sockets to group"
70108+ depends on GRKERNSEC_SOCKET
70109+ help
70110+ If you say Y here, you will be able to choose a GID of whose users will
70111+ be unable to connect to other hosts from your machine, but will be
70112+ able to run servers. If this option is enabled, all users in the group
70113+ you specify will have to use passive mode when initiating ftp transfers
70114+ from the shell on your machine. If the sysctl option is enabled, a
70115+ sysctl option with name "socket_client" is created.
70116+
70117+config GRKERNSEC_SOCKET_CLIENT_GID
70118+ int "GID to deny client sockets for"
70119+ depends on GRKERNSEC_SOCKET_CLIENT
70120+ default 1003
70121+ help
70122+ Here you can choose the GID to disable client socket access for.
70123+ Remember to add the users you want client socket access disabled for to
70124+ the GID specified here. If the sysctl option is enabled, a sysctl
70125+ option with name "socket_client_gid" is created.
70126+
70127+config GRKERNSEC_SOCKET_SERVER
70128+ bool "Deny server sockets to group"
70129+ depends on GRKERNSEC_SOCKET
70130+ help
70131+ If you say Y here, you will be able to choose a GID of whose users will
70132+ be unable to run server applications from your machine. If the sysctl
70133+ option is enabled, a sysctl option with name "socket_server" is created.
70134+
70135+config GRKERNSEC_SOCKET_SERVER_GID
70136+ int "GID to deny server sockets for"
70137+ depends on GRKERNSEC_SOCKET_SERVER
70138+ default 1002
70139+ help
70140+ Here you can choose the GID to disable server socket access for.
70141+ Remember to add the users you want server socket access disabled for to
70142+ the GID specified here. If the sysctl option is enabled, a sysctl
70143+ option with name "socket_server_gid" is created.
70144+
70145+endmenu
70146+
70147+menu "Physical Protections"
70148+depends on GRKERNSEC
70149+
70150+config GRKERNSEC_DENYUSB
70151+ bool "Deny new USB connections after toggle"
70152+ default y if GRKERNSEC_CONFIG_AUTO
70153+ depends on SYSCTL && USB_SUPPORT
70154+ help
70155+ If you say Y here, a new sysctl option with name "deny_new_usb"
70156+ will be created. Setting its value to 1 will prevent any new
70157+ USB devices from being recognized by the OS. Any attempted USB
70158+ device insertion will be logged. This option is intended to be
70159+ used against custom USB devices designed to exploit vulnerabilities
70160+ in various USB device drivers.
70161+
70162+ For greatest effectiveness, this sysctl should be set after any
70163+ relevant init scripts. This option is safe to enable in distros
70164+ as each user can choose whether or not to toggle the sysctl.
70165+
70166+config GRKERNSEC_DENYUSB_FORCE
70167+ bool "Reject all USB devices not connected at boot"
70168+ select USB
70169+ depends on GRKERNSEC_DENYUSB
70170+ help
70171+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
70172+ that doesn't involve a sysctl entry. This option should only be
70173+ enabled if you're sure you want to deny all new USB connections
70174+ at runtime and don't want to modify init scripts. This should not
70175+ be enabled by distros. It forces the core USB code to be built
70176+ into the kernel image so that all devices connected at boot time
70177+ can be recognized and new USB device connections can be prevented
70178+ prior to init running.
70179+
70180+endmenu
70181+
70182+menu "Sysctl Support"
70183+depends on GRKERNSEC && SYSCTL
70184+
70185+config GRKERNSEC_SYSCTL
70186+ bool "Sysctl support"
70187+ default y if GRKERNSEC_CONFIG_AUTO
70188+ help
70189+ If you say Y here, you will be able to change the options that
70190+ grsecurity runs with at bootup, without having to recompile your
70191+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
70192+ to enable (1) or disable (0) various features. All the sysctl entries
70193+ are mutable until the "grsec_lock" entry is set to a non-zero value.
70194+ All features enabled in the kernel configuration are disabled at boot
70195+ if you do not say Y to the "Turn on features by default" option.
70196+ All options should be set at startup, and the grsec_lock entry should
70197+ be set to a non-zero value after all the options are set.
70198+ *THIS IS EXTREMELY IMPORTANT*
70199+
70200+config GRKERNSEC_SYSCTL_DISTRO
70201+ bool "Extra sysctl support for distro makers (READ HELP)"
70202+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
70203+ help
70204+ If you say Y here, additional sysctl options will be created
70205+ for features that affect processes running as root. Therefore,
70206+ it is critical when using this option that the grsec_lock entry be
70207+ enabled after boot. Only distros with prebuilt kernel packages
70208+ with this option enabled that can ensure grsec_lock is enabled
70209+ after boot should use this option.
70210+ *Failure to set grsec_lock after boot makes all grsec features
70211+ this option covers useless*
70212+
70213+ Currently this option creates the following sysctl entries:
70214+ "Disable Privileged I/O": "disable_priv_io"
70215+
70216+config GRKERNSEC_SYSCTL_ON
70217+ bool "Turn on features by default"
70218+ default y if GRKERNSEC_CONFIG_AUTO
70219+ depends on GRKERNSEC_SYSCTL
70220+ help
70221+ If you say Y here, instead of having all features enabled in the
70222+ kernel configuration disabled at boot time, the features will be
70223+ enabled at boot time. It is recommended you say Y here unless
70224+ there is some reason you would want all sysctl-tunable features to
70225+ be disabled by default. As mentioned elsewhere, it is important
70226+ to enable the grsec_lock entry once you have finished modifying
70227+ the sysctl entries.
70228+
70229+endmenu
70230+menu "Logging Options"
70231+depends on GRKERNSEC
70232+
70233+config GRKERNSEC_FLOODTIME
70234+ int "Seconds in between log messages (minimum)"
70235+ default 10
70236+ help
70237+ This option allows you to enforce the number of seconds between
70238+ grsecurity log messages. The default should be suitable for most
70239+ people, however, if you choose to change it, choose a value small enough
70240+ to allow informative logs to be produced, but large enough to
70241+ prevent flooding.
70242+
70243+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
70244+ any rate limiting on grsecurity log messages.
70245+
70246+config GRKERNSEC_FLOODBURST
70247+ int "Number of messages in a burst (maximum)"
70248+ default 6
70249+ help
70250+ This option allows you to choose the maximum number of messages allowed
70251+ within the flood time interval you chose in a separate option. The
70252+ default should be suitable for most people, however if you find that
70253+ many of your logs are being interpreted as flooding, you may want to
70254+ raise this value.
70255+
70256+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
70257+ any rate limiting on grsecurity log messages.
70258+
70259+endmenu
70260diff --git a/grsecurity/Makefile b/grsecurity/Makefile
70261new file mode 100644
70262index 0000000..30ababb
70263--- /dev/null
70264+++ b/grsecurity/Makefile
70265@@ -0,0 +1,54 @@
70266+# grsecurity – access control and security hardening for Linux
70267+# All code in this directory and various hooks located throughout the Linux kernel are
70268+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
70269+# http://www.grsecurity.net spender@grsecurity.net
70270+#
70271+# This program is free software; you can redistribute it and/or
70272+# modify it under the terms of the GNU General Public License version 2
70273+# as published by the Free Software Foundation.
70274+#
70275+# This program is distributed in the hope that it will be useful,
70276+# but WITHOUT ANY WARRANTY; without even the implied warranty of
70277+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
70278+# GNU General Public License for more details.
70279+#
70280+# You should have received a copy of the GNU General Public License
70281+# along with this program; if not, write to the Free Software
70282+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
70283+
70284+KBUILD_CFLAGS += -Werror
70285+
70286+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
70287+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
70288+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
70289+ grsec_usb.o grsec_ipc.o grsec_proc.o
70290+
70291+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
70292+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
70293+ gracl_learn.o grsec_log.o gracl_policy.o
70294+ifdef CONFIG_COMPAT
70295+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
70296+endif
70297+
70298+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
70299+
70300+ifdef CONFIG_NET
70301+obj-y += grsec_sock.o
70302+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
70303+endif
70304+
70305+ifndef CONFIG_GRKERNSEC
70306+obj-y += grsec_disabled.o
70307+endif
70308+
70309+ifdef CONFIG_GRKERNSEC_HIDESYM
70310+extra-y := grsec_hidesym.o
70311+$(obj)/grsec_hidesym.o:
70312+ @-chmod -f 500 /boot
70313+ @-chmod -f 500 /lib/modules
70314+ @-chmod -f 500 /lib64/modules
70315+ @-chmod -f 500 /lib32/modules
70316+ @-chmod -f 700 .
70317+ @-chmod -f 700 $(objtree)
70318+ @echo ' grsec: protected kernel image paths'
70319+endif
70320diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
70321new file mode 100644
70322index 0000000..58223f6
70323--- /dev/null
70324+++ b/grsecurity/gracl.c
70325@@ -0,0 +1,2702 @@
70326+#include <linux/kernel.h>
70327+#include <linux/module.h>
70328+#include <linux/sched.h>
70329+#include <linux/mm.h>
70330+#include <linux/file.h>
70331+#include <linux/fs.h>
70332+#include <linux/namei.h>
70333+#include <linux/mount.h>
70334+#include <linux/tty.h>
70335+#include <linux/proc_fs.h>
70336+#include <linux/lglock.h>
70337+#include <linux/slab.h>
70338+#include <linux/vmalloc.h>
70339+#include <linux/types.h>
70340+#include <linux/sysctl.h>
70341+#include <linux/netdevice.h>
70342+#include <linux/ptrace.h>
70343+#include <linux/gracl.h>
70344+#include <linux/gralloc.h>
70345+#include <linux/security.h>
70346+#include <linux/grinternal.h>
70347+#include <linux/pid_namespace.h>
70348+#include <linux/stop_machine.h>
70349+#include <linux/fdtable.h>
70350+#include <linux/percpu.h>
70351+#include <linux/lglock.h>
70352+#include <linux/hugetlb.h>
70353+#include <linux/posix-timers.h>
70354+#include <linux/prefetch.h>
70355+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70356+#include <linux/magic.h>
70357+#include <linux/pagemap.h>
70358+#include "../fs/btrfs/async-thread.h"
70359+#include "../fs/btrfs/ctree.h"
70360+#include "../fs/btrfs/btrfs_inode.h"
70361+#endif
70362+#include "../fs/mount.h"
70363+
70364+#include <asm/uaccess.h>
70365+#include <asm/errno.h>
70366+#include <asm/mman.h>
70367+
70368+#define FOR_EACH_ROLE_START(role) \
70369+ role = running_polstate.role_list; \
70370+ while (role) {
70371+
70372+#define FOR_EACH_ROLE_END(role) \
70373+ role = role->prev; \
70374+ }
70375+
70376+extern struct path gr_real_root;
70377+
70378+static struct gr_policy_state running_polstate;
70379+struct gr_policy_state *polstate = &running_polstate;
70380+extern struct gr_alloc_state *current_alloc_state;
70381+
70382+extern char *gr_shared_page[4];
70383+DEFINE_RWLOCK(gr_inode_lock);
70384+
70385+static unsigned int gr_status __read_only = GR_STATUS_INIT;
70386+
70387+#ifdef CONFIG_NET
70388+extern struct vfsmount *sock_mnt;
70389+#endif
70390+
70391+extern struct vfsmount *pipe_mnt;
70392+extern struct vfsmount *shm_mnt;
70393+
70394+#ifdef CONFIG_HUGETLBFS
70395+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
70396+#endif
70397+
70398+extern u16 acl_sp_role_value;
70399+extern struct acl_object_label *fakefs_obj_rw;
70400+extern struct acl_object_label *fakefs_obj_rwx;
70401+
70402+int gr_acl_is_enabled(void)
70403+{
70404+ return (gr_status & GR_READY);
70405+}
70406+
70407+void gr_enable_rbac_system(void)
70408+{
70409+ pax_open_kernel();
70410+ gr_status |= GR_READY;
70411+ pax_close_kernel();
70412+}
70413+
70414+int gr_rbac_disable(void *unused)
70415+{
70416+ pax_open_kernel();
70417+ gr_status &= ~GR_READY;
70418+ pax_close_kernel();
70419+
70420+ return 0;
70421+}
70422+
70423+static inline dev_t __get_dev(const struct dentry *dentry)
70424+{
70425+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70426+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
70427+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
70428+ else
70429+#endif
70430+ return dentry->d_sb->s_dev;
70431+}
70432+
70433+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
70434+{
70435+ return __get_dev(dentry);
70436+}
70437+
70438+static char gr_task_roletype_to_char(struct task_struct *task)
70439+{
70440+ switch (task->role->roletype &
70441+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
70442+ GR_ROLE_SPECIAL)) {
70443+ case GR_ROLE_DEFAULT:
70444+ return 'D';
70445+ case GR_ROLE_USER:
70446+ return 'U';
70447+ case GR_ROLE_GROUP:
70448+ return 'G';
70449+ case GR_ROLE_SPECIAL:
70450+ return 'S';
70451+ }
70452+
70453+ return 'X';
70454+}
70455+
70456+char gr_roletype_to_char(void)
70457+{
70458+ return gr_task_roletype_to_char(current);
70459+}
70460+
70461+__inline__ int
70462+gr_acl_tpe_check(void)
70463+{
70464+ if (unlikely(!(gr_status & GR_READY)))
70465+ return 0;
70466+ if (current->role->roletype & GR_ROLE_TPE)
70467+ return 1;
70468+ else
70469+ return 0;
70470+}
70471+
70472+int
70473+gr_handle_rawio(const struct inode *inode)
70474+{
70475+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
70476+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
70477+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
70478+ !capable(CAP_SYS_RAWIO))
70479+ return 1;
70480+#endif
70481+ return 0;
70482+}
70483+
70484+int
70485+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
70486+{
70487+ if (likely(lena != lenb))
70488+ return 0;
70489+
70490+ return !memcmp(a, b, lena);
70491+}
70492+
70493+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
70494+{
70495+ *buflen -= namelen;
70496+ if (*buflen < 0)
70497+ return -ENAMETOOLONG;
70498+ *buffer -= namelen;
70499+ memcpy(*buffer, str, namelen);
70500+ return 0;
70501+}
70502+
70503+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
70504+{
70505+ return prepend(buffer, buflen, name->name, name->len);
70506+}
70507+
70508+static int prepend_path(const struct path *path, struct path *root,
70509+ char **buffer, int *buflen)
70510+{
70511+ struct dentry *dentry = path->dentry;
70512+ struct vfsmount *vfsmnt = path->mnt;
70513+ struct mount *mnt = real_mount(vfsmnt);
70514+ bool slash = false;
70515+ int error = 0;
70516+
70517+ while (dentry != root->dentry || vfsmnt != root->mnt) {
70518+ struct dentry * parent;
70519+
70520+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
70521+ /* Global root? */
70522+ if (!mnt_has_parent(mnt)) {
70523+ goto out;
70524+ }
70525+ dentry = mnt->mnt_mountpoint;
70526+ mnt = mnt->mnt_parent;
70527+ vfsmnt = &mnt->mnt;
70528+ continue;
70529+ }
70530+ parent = dentry->d_parent;
70531+ prefetch(parent);
70532+ spin_lock(&dentry->d_lock);
70533+ error = prepend_name(buffer, buflen, &dentry->d_name);
70534+ spin_unlock(&dentry->d_lock);
70535+ if (!error)
70536+ error = prepend(buffer, buflen, "/", 1);
70537+ if (error)
70538+ break;
70539+
70540+ slash = true;
70541+ dentry = parent;
70542+ }
70543+
70544+out:
70545+ if (!error && !slash)
70546+ error = prepend(buffer, buflen, "/", 1);
70547+
70548+ return error;
70549+}
70550+
70551+/* this must be called with mount_lock and rename_lock held */
70552+
70553+static char *__our_d_path(const struct path *path, struct path *root,
70554+ char *buf, int buflen)
70555+{
70556+ char *res = buf + buflen;
70557+ int error;
70558+
70559+ prepend(&res, &buflen, "\0", 1);
70560+ error = prepend_path(path, root, &res, &buflen);
70561+ if (error)
70562+ return ERR_PTR(error);
70563+
70564+ return res;
70565+}
70566+
70567+static char *
70568+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
70569+{
70570+ char *retval;
70571+
70572+ retval = __our_d_path(path, root, buf, buflen);
70573+ if (unlikely(IS_ERR(retval)))
70574+ retval = strcpy(buf, "<path too long>");
70575+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
70576+ retval[1] = '\0';
70577+
70578+ return retval;
70579+}
70580+
70581+static char *
70582+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
70583+ char *buf, int buflen)
70584+{
70585+ struct path path;
70586+ char *res;
70587+
70588+ path.dentry = (struct dentry *)dentry;
70589+ path.mnt = (struct vfsmount *)vfsmnt;
70590+
70591+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
70592+ by the RBAC system */
70593+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
70594+
70595+ return res;
70596+}
70597+
70598+static char *
70599+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
70600+ char *buf, int buflen)
70601+{
70602+ char *res;
70603+ struct path path;
70604+ struct path root;
70605+ struct task_struct *reaper = init_pid_ns.child_reaper;
70606+
70607+ path.dentry = (struct dentry *)dentry;
70608+ path.mnt = (struct vfsmount *)vfsmnt;
70609+
70610+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
70611+ get_fs_root(reaper->fs, &root);
70612+
70613+ read_seqlock_excl(&mount_lock);
70614+ write_seqlock(&rename_lock);
70615+ res = gen_full_path(&path, &root, buf, buflen);
70616+ write_sequnlock(&rename_lock);
70617+ read_sequnlock_excl(&mount_lock);
70618+
70619+ path_put(&root);
70620+ return res;
70621+}
70622+
70623+char *
70624+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
70625+{
70626+ char *ret;
70627+ read_seqlock_excl(&mount_lock);
70628+ write_seqlock(&rename_lock);
70629+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
70630+ PAGE_SIZE);
70631+ write_sequnlock(&rename_lock);
70632+ read_sequnlock_excl(&mount_lock);
70633+ return ret;
70634+}
70635+
70636+static char *
70637+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
70638+{
70639+ char *ret;
70640+ char *buf;
70641+ int buflen;
70642+
70643+ read_seqlock_excl(&mount_lock);
70644+ write_seqlock(&rename_lock);
70645+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
70646+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
70647+ buflen = (int)(ret - buf);
70648+ if (buflen >= 5)
70649+ prepend(&ret, &buflen, "/proc", 5);
70650+ else
70651+ ret = strcpy(buf, "<path too long>");
70652+ write_sequnlock(&rename_lock);
70653+ read_sequnlock_excl(&mount_lock);
70654+ return ret;
70655+}
70656+
70657+char *
70658+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
70659+{
70660+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
70661+ PAGE_SIZE);
70662+}
70663+
70664+char *
70665+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
70666+{
70667+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
70668+ PAGE_SIZE);
70669+}
70670+
70671+char *
70672+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
70673+{
70674+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
70675+ PAGE_SIZE);
70676+}
70677+
70678+char *
70679+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
70680+{
70681+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
70682+ PAGE_SIZE);
70683+}
70684+
70685+char *
70686+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
70687+{
70688+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
70689+ PAGE_SIZE);
70690+}
70691+
70692+__inline__ __u32
70693+to_gr_audit(const __u32 reqmode)
70694+{
70695+ /* masks off auditable permission flags, then shifts them to create
70696+ auditing flags, and adds the special case of append auditing if
70697+ we're requesting write */
70698+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
70699+}
70700+
70701+struct acl_role_label *
70702+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
70703+ const gid_t gid)
70704+{
70705+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
70706+ struct acl_role_label *match;
70707+ struct role_allowed_ip *ipp;
70708+ unsigned int x;
70709+ u32 curr_ip = task->signal->saved_ip;
70710+
70711+ match = state->acl_role_set.r_hash[index];
70712+
70713+ while (match) {
70714+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
70715+ for (x = 0; x < match->domain_child_num; x++) {
70716+ if (match->domain_children[x] == uid)
70717+ goto found;
70718+ }
70719+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
70720+ break;
70721+ match = match->next;
70722+ }
70723+found:
70724+ if (match == NULL) {
70725+ try_group:
70726+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
70727+ match = state->acl_role_set.r_hash[index];
70728+
70729+ while (match) {
70730+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
70731+ for (x = 0; x < match->domain_child_num; x++) {
70732+ if (match->domain_children[x] == gid)
70733+ goto found2;
70734+ }
70735+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
70736+ break;
70737+ match = match->next;
70738+ }
70739+found2:
70740+ if (match == NULL)
70741+ match = state->default_role;
70742+ if (match->allowed_ips == NULL)
70743+ return match;
70744+ else {
70745+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
70746+ if (likely
70747+ ((ntohl(curr_ip) & ipp->netmask) ==
70748+ (ntohl(ipp->addr) & ipp->netmask)))
70749+ return match;
70750+ }
70751+ match = state->default_role;
70752+ }
70753+ } else if (match->allowed_ips == NULL) {
70754+ return match;
70755+ } else {
70756+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
70757+ if (likely
70758+ ((ntohl(curr_ip) & ipp->netmask) ==
70759+ (ntohl(ipp->addr) & ipp->netmask)))
70760+ return match;
70761+ }
70762+ goto try_group;
70763+ }
70764+
70765+ return match;
70766+}
70767+
70768+static struct acl_role_label *
70769+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
70770+ const gid_t gid)
70771+{
70772+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
70773+}
70774+
70775+struct acl_subject_label *
70776+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
70777+ const struct acl_role_label *role)
70778+{
70779+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
70780+ struct acl_subject_label *match;
70781+
70782+ match = role->subj_hash[index];
70783+
70784+ while (match && (match->inode != ino || match->device != dev ||
70785+ (match->mode & GR_DELETED))) {
70786+ match = match->next;
70787+ }
70788+
70789+ if (match && !(match->mode & GR_DELETED))
70790+ return match;
70791+ else
70792+ return NULL;
70793+}
70794+
70795+struct acl_subject_label *
70796+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
70797+ const struct acl_role_label *role)
70798+{
70799+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
70800+ struct acl_subject_label *match;
70801+
70802+ match = role->subj_hash[index];
70803+
70804+ while (match && (match->inode != ino || match->device != dev ||
70805+ !(match->mode & GR_DELETED))) {
70806+ match = match->next;
70807+ }
70808+
70809+ if (match && (match->mode & GR_DELETED))
70810+ return match;
70811+ else
70812+ return NULL;
70813+}
70814+
70815+static struct acl_object_label *
70816+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
70817+ const struct acl_subject_label *subj)
70818+{
70819+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
70820+ struct acl_object_label *match;
70821+
70822+ match = subj->obj_hash[index];
70823+
70824+ while (match && (match->inode != ino || match->device != dev ||
70825+ (match->mode & GR_DELETED))) {
70826+ match = match->next;
70827+ }
70828+
70829+ if (match && !(match->mode & GR_DELETED))
70830+ return match;
70831+ else
70832+ return NULL;
70833+}
70834+
70835+static struct acl_object_label *
70836+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
70837+ const struct acl_subject_label *subj)
70838+{
70839+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
70840+ struct acl_object_label *match;
70841+
70842+ match = subj->obj_hash[index];
70843+
70844+ while (match && (match->inode != ino || match->device != dev ||
70845+ !(match->mode & GR_DELETED))) {
70846+ match = match->next;
70847+ }
70848+
70849+ if (match && (match->mode & GR_DELETED))
70850+ return match;
70851+
70852+ match = subj->obj_hash[index];
70853+
70854+ while (match && (match->inode != ino || match->device != dev ||
70855+ (match->mode & GR_DELETED))) {
70856+ match = match->next;
70857+ }
70858+
70859+ if (match && !(match->mode & GR_DELETED))
70860+ return match;
70861+ else
70862+ return NULL;
70863+}
70864+
70865+struct name_entry *
70866+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
70867+{
70868+ unsigned int len = strlen(name);
70869+ unsigned int key = full_name_hash(name, len);
70870+ unsigned int index = key % state->name_set.n_size;
70871+ struct name_entry *match;
70872+
70873+ match = state->name_set.n_hash[index];
70874+
70875+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
70876+ match = match->next;
70877+
70878+ return match;
70879+}
70880+
70881+static struct name_entry *
70882+lookup_name_entry(const char *name)
70883+{
70884+ return __lookup_name_entry(&running_polstate, name);
70885+}
70886+
70887+static struct name_entry *
70888+lookup_name_entry_create(const char *name)
70889+{
70890+ unsigned int len = strlen(name);
70891+ unsigned int key = full_name_hash(name, len);
70892+ unsigned int index = key % running_polstate.name_set.n_size;
70893+ struct name_entry *match;
70894+
70895+ match = running_polstate.name_set.n_hash[index];
70896+
70897+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70898+ !match->deleted))
70899+ match = match->next;
70900+
70901+ if (match && match->deleted)
70902+ return match;
70903+
70904+ match = running_polstate.name_set.n_hash[index];
70905+
70906+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70907+ match->deleted))
70908+ match = match->next;
70909+
70910+ if (match && !match->deleted)
70911+ return match;
70912+ else
70913+ return NULL;
70914+}
70915+
70916+static struct inodev_entry *
70917+lookup_inodev_entry(const ino_t ino, const dev_t dev)
70918+{
70919+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
70920+ struct inodev_entry *match;
70921+
70922+ match = running_polstate.inodev_set.i_hash[index];
70923+
70924+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
70925+ match = match->next;
70926+
70927+ return match;
70928+}
70929+
70930+void
70931+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
70932+{
70933+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
70934+ state->inodev_set.i_size);
70935+ struct inodev_entry **curr;
70936+
70937+ entry->prev = NULL;
70938+
70939+ curr = &state->inodev_set.i_hash[index];
70940+ if (*curr != NULL)
70941+ (*curr)->prev = entry;
70942+
70943+ entry->next = *curr;
70944+ *curr = entry;
70945+
70946+ return;
70947+}
70948+
70949+static void
70950+insert_inodev_entry(struct inodev_entry *entry)
70951+{
70952+ __insert_inodev_entry(&running_polstate, entry);
70953+}
70954+
70955+void
70956+insert_acl_obj_label(struct acl_object_label *obj,
70957+ struct acl_subject_label *subj)
70958+{
70959+ unsigned int index =
70960+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
70961+ struct acl_object_label **curr;
70962+
70963+ obj->prev = NULL;
70964+
70965+ curr = &subj->obj_hash[index];
70966+ if (*curr != NULL)
70967+ (*curr)->prev = obj;
70968+
70969+ obj->next = *curr;
70970+ *curr = obj;
70971+
70972+ return;
70973+}
70974+
70975+void
70976+insert_acl_subj_label(struct acl_subject_label *obj,
70977+ struct acl_role_label *role)
70978+{
70979+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
70980+ struct acl_subject_label **curr;
70981+
70982+ obj->prev = NULL;
70983+
70984+ curr = &role->subj_hash[index];
70985+ if (*curr != NULL)
70986+ (*curr)->prev = obj;
70987+
70988+ obj->next = *curr;
70989+ *curr = obj;
70990+
70991+ return;
70992+}
70993+
70994+/* derived from glibc fnmatch() 0: match, 1: no match*/
70995+
70996+static int
70997+glob_match(const char *p, const char *n)
70998+{
70999+ char c;
71000+
71001+ while ((c = *p++) != '\0') {
71002+ switch (c) {
71003+ case '?':
71004+ if (*n == '\0')
71005+ return 1;
71006+ else if (*n == '/')
71007+ return 1;
71008+ break;
71009+ case '\\':
71010+ if (*n != c)
71011+ return 1;
71012+ break;
71013+ case '*':
71014+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
71015+ if (*n == '/')
71016+ return 1;
71017+ else if (c == '?') {
71018+ if (*n == '\0')
71019+ return 1;
71020+ else
71021+ ++n;
71022+ }
71023+ }
71024+ if (c == '\0') {
71025+ return 0;
71026+ } else {
71027+ const char *endp;
71028+
71029+ if ((endp = strchr(n, '/')) == NULL)
71030+ endp = n + strlen(n);
71031+
71032+ if (c == '[') {
71033+ for (--p; n < endp; ++n)
71034+ if (!glob_match(p, n))
71035+ return 0;
71036+ } else if (c == '/') {
71037+ while (*n != '\0' && *n != '/')
71038+ ++n;
71039+ if (*n == '/' && !glob_match(p, n + 1))
71040+ return 0;
71041+ } else {
71042+ for (--p; n < endp; ++n)
71043+ if (*n == c && !glob_match(p, n))
71044+ return 0;
71045+ }
71046+
71047+ return 1;
71048+ }
71049+ case '[':
71050+ {
71051+ int not;
71052+ char cold;
71053+
71054+ if (*n == '\0' || *n == '/')
71055+ return 1;
71056+
71057+ not = (*p == '!' || *p == '^');
71058+ if (not)
71059+ ++p;
71060+
71061+ c = *p++;
71062+ for (;;) {
71063+ unsigned char fn = (unsigned char)*n;
71064+
71065+ if (c == '\0')
71066+ return 1;
71067+ else {
71068+ if (c == fn)
71069+ goto matched;
71070+ cold = c;
71071+ c = *p++;
71072+
71073+ if (c == '-' && *p != ']') {
71074+ unsigned char cend = *p++;
71075+
71076+ if (cend == '\0')
71077+ return 1;
71078+
71079+ if (cold <= fn && fn <= cend)
71080+ goto matched;
71081+
71082+ c = *p++;
71083+ }
71084+ }
71085+
71086+ if (c == ']')
71087+ break;
71088+ }
71089+ if (!not)
71090+ return 1;
71091+ break;
71092+ matched:
71093+ while (c != ']') {
71094+ if (c == '\0')
71095+ return 1;
71096+
71097+ c = *p++;
71098+ }
71099+ if (not)
71100+ return 1;
71101+ }
71102+ break;
71103+ default:
71104+ if (c != *n)
71105+ return 1;
71106+ }
71107+
71108+ ++n;
71109+ }
71110+
71111+ if (*n == '\0')
71112+ return 0;
71113+
71114+ if (*n == '/')
71115+ return 0;
71116+
71117+ return 1;
71118+}
71119+
71120+static struct acl_object_label *
71121+chk_glob_label(struct acl_object_label *globbed,
71122+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
71123+{
71124+ struct acl_object_label *tmp;
71125+
71126+ if (*path == NULL)
71127+ *path = gr_to_filename_nolock(dentry, mnt);
71128+
71129+ tmp = globbed;
71130+
71131+ while (tmp) {
71132+ if (!glob_match(tmp->filename, *path))
71133+ return tmp;
71134+ tmp = tmp->next;
71135+ }
71136+
71137+ return NULL;
71138+}
71139+
71140+static struct acl_object_label *
71141+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71142+ const ino_t curr_ino, const dev_t curr_dev,
71143+ const struct acl_subject_label *subj, char **path, const int checkglob)
71144+{
71145+ struct acl_subject_label *tmpsubj;
71146+ struct acl_object_label *retval;
71147+ struct acl_object_label *retval2;
71148+
71149+ tmpsubj = (struct acl_subject_label *) subj;
71150+ read_lock(&gr_inode_lock);
71151+ do {
71152+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
71153+ if (retval) {
71154+ if (checkglob && retval->globbed) {
71155+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
71156+ if (retval2)
71157+ retval = retval2;
71158+ }
71159+ break;
71160+ }
71161+ } while ((tmpsubj = tmpsubj->parent_subject));
71162+ read_unlock(&gr_inode_lock);
71163+
71164+ return retval;
71165+}
71166+
71167+static __inline__ struct acl_object_label *
71168+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71169+ struct dentry *curr_dentry,
71170+ const struct acl_subject_label *subj, char **path, const int checkglob)
71171+{
71172+ int newglob = checkglob;
71173+ ino_t inode;
71174+ dev_t device;
71175+
71176+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
71177+ as we don't want a / * rule to match instead of the / object
71178+ don't do this for create lookups that call this function though, since they're looking up
71179+ on the parent and thus need globbing checks on all paths
71180+ */
71181+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
71182+ newglob = GR_NO_GLOB;
71183+
71184+ spin_lock(&curr_dentry->d_lock);
71185+ inode = curr_dentry->d_inode->i_ino;
71186+ device = __get_dev(curr_dentry);
71187+ spin_unlock(&curr_dentry->d_lock);
71188+
71189+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
71190+}
71191+
71192+#ifdef CONFIG_HUGETLBFS
71193+static inline bool
71194+is_hugetlbfs_mnt(const struct vfsmount *mnt)
71195+{
71196+ int i;
71197+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
71198+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
71199+ return true;
71200+ }
71201+
71202+ return false;
71203+}
71204+#endif
71205+
71206+static struct acl_object_label *
71207+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71208+ const struct acl_subject_label *subj, char *path, const int checkglob)
71209+{
71210+ struct dentry *dentry = (struct dentry *) l_dentry;
71211+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
71212+ struct mount *real_mnt = real_mount(mnt);
71213+ struct acl_object_label *retval;
71214+ struct dentry *parent;
71215+
71216+ read_seqlock_excl(&mount_lock);
71217+ write_seqlock(&rename_lock);
71218+
71219+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
71220+#ifdef CONFIG_NET
71221+ mnt == sock_mnt ||
71222+#endif
71223+#ifdef CONFIG_HUGETLBFS
71224+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
71225+#endif
71226+ /* ignore Eric Biederman */
71227+ IS_PRIVATE(l_dentry->d_inode))) {
71228+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
71229+ goto out;
71230+ }
71231+
71232+ for (;;) {
71233+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
71234+ break;
71235+
71236+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
71237+ if (!mnt_has_parent(real_mnt))
71238+ break;
71239+
71240+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71241+ if (retval != NULL)
71242+ goto out;
71243+
71244+ dentry = real_mnt->mnt_mountpoint;
71245+ real_mnt = real_mnt->mnt_parent;
71246+ mnt = &real_mnt->mnt;
71247+ continue;
71248+ }
71249+
71250+ parent = dentry->d_parent;
71251+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71252+ if (retval != NULL)
71253+ goto out;
71254+
71255+ dentry = parent;
71256+ }
71257+
71258+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71259+
71260+ /* gr_real_root is pinned so we don't have to hold a reference */
71261+ if (retval == NULL)
71262+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
71263+out:
71264+ write_sequnlock(&rename_lock);
71265+ read_sequnlock_excl(&mount_lock);
71266+
71267+ BUG_ON(retval == NULL);
71268+
71269+ return retval;
71270+}
71271+
71272+static __inline__ struct acl_object_label *
71273+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71274+ const struct acl_subject_label *subj)
71275+{
71276+ char *path = NULL;
71277+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
71278+}
71279+
71280+static __inline__ struct acl_object_label *
71281+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71282+ const struct acl_subject_label *subj)
71283+{
71284+ char *path = NULL;
71285+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
71286+}
71287+
71288+static __inline__ struct acl_object_label *
71289+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71290+ const struct acl_subject_label *subj, char *path)
71291+{
71292+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
71293+}
71294+
71295+struct acl_subject_label *
71296+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71297+ const struct acl_role_label *role)
71298+{
71299+ struct dentry *dentry = (struct dentry *) l_dentry;
71300+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
71301+ struct mount *real_mnt = real_mount(mnt);
71302+ struct acl_subject_label *retval;
71303+ struct dentry *parent;
71304+
71305+ read_seqlock_excl(&mount_lock);
71306+ write_seqlock(&rename_lock);
71307+
71308+ for (;;) {
71309+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
71310+ break;
71311+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
71312+ if (!mnt_has_parent(real_mnt))
71313+ break;
71314+
71315+ spin_lock(&dentry->d_lock);
71316+ read_lock(&gr_inode_lock);
71317+ retval =
71318+ lookup_acl_subj_label(dentry->d_inode->i_ino,
71319+ __get_dev(dentry), role);
71320+ read_unlock(&gr_inode_lock);
71321+ spin_unlock(&dentry->d_lock);
71322+ if (retval != NULL)
71323+ goto out;
71324+
71325+ dentry = real_mnt->mnt_mountpoint;
71326+ real_mnt = real_mnt->mnt_parent;
71327+ mnt = &real_mnt->mnt;
71328+ continue;
71329+ }
71330+
71331+ spin_lock(&dentry->d_lock);
71332+ read_lock(&gr_inode_lock);
71333+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
71334+ __get_dev(dentry), role);
71335+ read_unlock(&gr_inode_lock);
71336+ parent = dentry->d_parent;
71337+ spin_unlock(&dentry->d_lock);
71338+
71339+ if (retval != NULL)
71340+ goto out;
71341+
71342+ dentry = parent;
71343+ }
71344+
71345+ spin_lock(&dentry->d_lock);
71346+ read_lock(&gr_inode_lock);
71347+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
71348+ __get_dev(dentry), role);
71349+ read_unlock(&gr_inode_lock);
71350+ spin_unlock(&dentry->d_lock);
71351+
71352+ if (unlikely(retval == NULL)) {
71353+ /* gr_real_root is pinned, we don't need to hold a reference */
71354+ read_lock(&gr_inode_lock);
71355+ retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
71356+ __get_dev(gr_real_root.dentry), role);
71357+ read_unlock(&gr_inode_lock);
71358+ }
71359+out:
71360+ write_sequnlock(&rename_lock);
71361+ read_sequnlock_excl(&mount_lock);
71362+
71363+ BUG_ON(retval == NULL);
71364+
71365+ return retval;
71366+}
71367+
71368+void
71369+assign_special_role(const char *rolename)
71370+{
71371+ struct acl_object_label *obj;
71372+ struct acl_role_label *r;
71373+ struct acl_role_label *assigned = NULL;
71374+ struct task_struct *tsk;
71375+ struct file *filp;
71376+
71377+ FOR_EACH_ROLE_START(r)
71378+ if (!strcmp(rolename, r->rolename) &&
71379+ (r->roletype & GR_ROLE_SPECIAL)) {
71380+ assigned = r;
71381+ break;
71382+ }
71383+ FOR_EACH_ROLE_END(r)
71384+
71385+ if (!assigned)
71386+ return;
71387+
71388+ read_lock(&tasklist_lock);
71389+ read_lock(&grsec_exec_file_lock);
71390+
71391+ tsk = current->real_parent;
71392+ if (tsk == NULL)
71393+ goto out_unlock;
71394+
71395+ filp = tsk->exec_file;
71396+ if (filp == NULL)
71397+ goto out_unlock;
71398+
71399+ tsk->is_writable = 0;
71400+ tsk->inherited = 0;
71401+
71402+ tsk->acl_sp_role = 1;
71403+ tsk->acl_role_id = ++acl_sp_role_value;
71404+ tsk->role = assigned;
71405+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
71406+
71407+ /* ignore additional mmap checks for processes that are writable
71408+ by the default ACL */
71409+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71410+ if (unlikely(obj->mode & GR_WRITE))
71411+ tsk->is_writable = 1;
71412+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
71413+ if (unlikely(obj->mode & GR_WRITE))
71414+ tsk->is_writable = 1;
71415+
71416+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71417+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
71418+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
71419+#endif
71420+
71421+out_unlock:
71422+ read_unlock(&grsec_exec_file_lock);
71423+ read_unlock(&tasklist_lock);
71424+ return;
71425+}
71426+
71427+
71428+static void
71429+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
71430+{
71431+ struct task_struct *task = current;
71432+ const struct cred *cred = current_cred();
71433+
71434+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
71435+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71436+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71437+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
71438+
71439+ return;
71440+}
71441+
71442+static void
71443+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
71444+{
71445+ struct task_struct *task = current;
71446+ const struct cred *cred = current_cred();
71447+
71448+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
71449+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71450+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71451+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
71452+
71453+ return;
71454+}
71455+
71456+static void
71457+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
71458+{
71459+ struct task_struct *task = current;
71460+ const struct cred *cred = current_cred();
71461+
71462+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
71463+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71464+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71465+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
71466+
71467+ return;
71468+}
71469+
71470+static void
71471+gr_set_proc_res(struct task_struct *task)
71472+{
71473+ struct acl_subject_label *proc;
71474+ unsigned short i;
71475+
71476+ proc = task->acl;
71477+
71478+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
71479+ return;
71480+
71481+ for (i = 0; i < RLIM_NLIMITS; i++) {
71482+ if (!(proc->resmask & (1U << i)))
71483+ continue;
71484+
71485+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
71486+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
71487+
71488+ if (i == RLIMIT_CPU)
71489+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
71490+ }
71491+
71492+ return;
71493+}
71494+
71495+/* both of the below must be called with
71496+ rcu_read_lock();
71497+ read_lock(&tasklist_lock);
71498+ read_lock(&grsec_exec_file_lock);
71499+*/
71500+
71501+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
71502+{
71503+ char *tmpname;
71504+ struct acl_subject_label *tmpsubj;
71505+ struct file *filp;
71506+ struct name_entry *nmatch;
71507+
71508+ filp = task->exec_file;
71509+ if (filp == NULL)
71510+ return NULL;
71511+
71512+ /* the following is to apply the correct subject
71513+ on binaries running when the RBAC system
71514+ is enabled, when the binaries have been
71515+ replaced or deleted since their execution
71516+ -----
71517+ when the RBAC system starts, the inode/dev
71518+ from exec_file will be one the RBAC system
71519+ is unaware of. It only knows the inode/dev
71520+ of the present file on disk, or the absence
71521+ of it.
71522+ */
71523+
71524+ if (filename)
71525+ nmatch = __lookup_name_entry(state, filename);
71526+ else {
71527+ preempt_disable();
71528+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
71529+
71530+ nmatch = __lookup_name_entry(state, tmpname);
71531+ preempt_enable();
71532+ }
71533+ tmpsubj = NULL;
71534+ if (nmatch) {
71535+ if (nmatch->deleted)
71536+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
71537+ else
71538+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
71539+ }
71540+ /* this also works for the reload case -- if we don't match a potentially inherited subject
71541+ then we fall back to a normal lookup based on the binary's ino/dev
71542+ */
71543+ if (tmpsubj == NULL)
71544+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
71545+
71546+ return tmpsubj;
71547+}
71548+
71549+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
71550+{
71551+ return __gr_get_subject_for_task(&running_polstate, task, filename);
71552+}
71553+
71554+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
71555+{
71556+ struct acl_object_label *obj;
71557+ struct file *filp;
71558+
71559+ filp = task->exec_file;
71560+
71561+ task->acl = subj;
71562+ task->is_writable = 0;
71563+ /* ignore additional mmap checks for processes that are writable
71564+ by the default ACL */
71565+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
71566+ if (unlikely(obj->mode & GR_WRITE))
71567+ task->is_writable = 1;
71568+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71569+ if (unlikely(obj->mode & GR_WRITE))
71570+ task->is_writable = 1;
71571+
71572+ gr_set_proc_res(task);
71573+
71574+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71575+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71576+#endif
71577+}
71578+
71579+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
71580+{
71581+ __gr_apply_subject_to_task(&running_polstate, task, subj);
71582+}
71583+
71584+__u32
71585+gr_search_file(const struct dentry * dentry, const __u32 mode,
71586+ const struct vfsmount * mnt)
71587+{
71588+ __u32 retval = mode;
71589+ struct acl_subject_label *curracl;
71590+ struct acl_object_label *currobj;
71591+
71592+ if (unlikely(!(gr_status & GR_READY)))
71593+ return (mode & ~GR_AUDITS);
71594+
71595+ curracl = current->acl;
71596+
71597+ currobj = chk_obj_label(dentry, mnt, curracl);
71598+ retval = currobj->mode & mode;
71599+
71600+ /* if we're opening a specified transfer file for writing
71601+ (e.g. /dev/initctl), then transfer our role to init
71602+ */
71603+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
71604+ current->role->roletype & GR_ROLE_PERSIST)) {
71605+ struct task_struct *task = init_pid_ns.child_reaper;
71606+
71607+ if (task->role != current->role) {
71608+ struct acl_subject_label *subj;
71609+
71610+ task->acl_sp_role = 0;
71611+ task->acl_role_id = current->acl_role_id;
71612+ task->role = current->role;
71613+ rcu_read_lock();
71614+ read_lock(&grsec_exec_file_lock);
71615+ subj = gr_get_subject_for_task(task, NULL);
71616+ gr_apply_subject_to_task(task, subj);
71617+ read_unlock(&grsec_exec_file_lock);
71618+ rcu_read_unlock();
71619+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
71620+ }
71621+ }
71622+
71623+ if (unlikely
71624+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
71625+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
71626+ __u32 new_mode = mode;
71627+
71628+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
71629+
71630+ retval = new_mode;
71631+
71632+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
71633+ new_mode |= GR_INHERIT;
71634+
71635+ if (!(mode & GR_NOLEARN))
71636+ gr_log_learn(dentry, mnt, new_mode);
71637+ }
71638+
71639+ return retval;
71640+}
71641+
71642+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
71643+ const struct dentry *parent,
71644+ const struct vfsmount *mnt)
71645+{
71646+ struct name_entry *match;
71647+ struct acl_object_label *matchpo;
71648+ struct acl_subject_label *curracl;
71649+ char *path;
71650+
71651+ if (unlikely(!(gr_status & GR_READY)))
71652+ return NULL;
71653+
71654+ preempt_disable();
71655+ path = gr_to_filename_rbac(new_dentry, mnt);
71656+ match = lookup_name_entry_create(path);
71657+
71658+ curracl = current->acl;
71659+
71660+ if (match) {
71661+ read_lock(&gr_inode_lock);
71662+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
71663+ read_unlock(&gr_inode_lock);
71664+
71665+ if (matchpo) {
71666+ preempt_enable();
71667+ return matchpo;
71668+ }
71669+ }
71670+
71671+ // lookup parent
71672+
71673+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
71674+
71675+ preempt_enable();
71676+ return matchpo;
71677+}
71678+
71679+__u32
71680+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
71681+ const struct vfsmount * mnt, const __u32 mode)
71682+{
71683+ struct acl_object_label *matchpo;
71684+ __u32 retval;
71685+
71686+ if (unlikely(!(gr_status & GR_READY)))
71687+ return (mode & ~GR_AUDITS);
71688+
71689+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
71690+
71691+ retval = matchpo->mode & mode;
71692+
71693+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
71694+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71695+ __u32 new_mode = mode;
71696+
71697+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
71698+
71699+ gr_log_learn(new_dentry, mnt, new_mode);
71700+ return new_mode;
71701+ }
71702+
71703+ return retval;
71704+}
71705+
71706+__u32
71707+gr_check_link(const struct dentry * new_dentry,
71708+ const struct dentry * parent_dentry,
71709+ const struct vfsmount * parent_mnt,
71710+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
71711+{
71712+ struct acl_object_label *obj;
71713+ __u32 oldmode, newmode;
71714+ __u32 needmode;
71715+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
71716+ GR_DELETE | GR_INHERIT;
71717+
71718+ if (unlikely(!(gr_status & GR_READY)))
71719+ return (GR_CREATE | GR_LINK);
71720+
71721+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
71722+ oldmode = obj->mode;
71723+
71724+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
71725+ newmode = obj->mode;
71726+
71727+ needmode = newmode & checkmodes;
71728+
71729+ // old name for hardlink must have at least the permissions of the new name
71730+ if ((oldmode & needmode) != needmode)
71731+ goto bad;
71732+
71733+ // if old name had restrictions/auditing, make sure the new name does as well
71734+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
71735+
71736+ // don't allow hardlinking of suid/sgid/fcapped files without permission
71737+ if (is_privileged_binary(old_dentry))
71738+ needmode |= GR_SETID;
71739+
71740+ if ((newmode & needmode) != needmode)
71741+ goto bad;
71742+
71743+ // enforce minimum permissions
71744+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
71745+ return newmode;
71746+bad:
71747+ needmode = oldmode;
71748+ if (is_privileged_binary(old_dentry))
71749+ needmode |= GR_SETID;
71750+
71751+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
71752+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
71753+ return (GR_CREATE | GR_LINK);
71754+ } else if (newmode & GR_SUPPRESS)
71755+ return GR_SUPPRESS;
71756+ else
71757+ return 0;
71758+}
71759+
71760+int
71761+gr_check_hidden_task(const struct task_struct *task)
71762+{
71763+ if (unlikely(!(gr_status & GR_READY)))
71764+ return 0;
71765+
71766+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
71767+ return 1;
71768+
71769+ return 0;
71770+}
71771+
71772+int
71773+gr_check_protected_task(const struct task_struct *task)
71774+{
71775+ if (unlikely(!(gr_status & GR_READY) || !task))
71776+ return 0;
71777+
71778+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
71779+ task->acl != current->acl)
71780+ return 1;
71781+
71782+ return 0;
71783+}
71784+
71785+int
71786+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
71787+{
71788+ struct task_struct *p;
71789+ int ret = 0;
71790+
71791+ if (unlikely(!(gr_status & GR_READY) || !pid))
71792+ return ret;
71793+
71794+ read_lock(&tasklist_lock);
71795+ do_each_pid_task(pid, type, p) {
71796+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
71797+ p->acl != current->acl) {
71798+ ret = 1;
71799+ goto out;
71800+ }
71801+ } while_each_pid_task(pid, type, p);
71802+out:
71803+ read_unlock(&tasklist_lock);
71804+
71805+ return ret;
71806+}
71807+
71808+void
71809+gr_copy_label(struct task_struct *tsk)
71810+{
71811+ struct task_struct *p = current;
71812+
71813+ tsk->inherited = p->inherited;
71814+ tsk->acl_sp_role = 0;
71815+ tsk->acl_role_id = p->acl_role_id;
71816+ tsk->acl = p->acl;
71817+ tsk->role = p->role;
71818+ tsk->signal->used_accept = 0;
71819+ tsk->signal->curr_ip = p->signal->curr_ip;
71820+ tsk->signal->saved_ip = p->signal->saved_ip;
71821+ if (p->exec_file)
71822+ get_file(p->exec_file);
71823+ tsk->exec_file = p->exec_file;
71824+ tsk->is_writable = p->is_writable;
71825+ if (unlikely(p->signal->used_accept)) {
71826+ p->signal->curr_ip = 0;
71827+ p->signal->saved_ip = 0;
71828+ }
71829+
71830+ return;
71831+}
71832+
71833+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
71834+
71835+int
71836+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
71837+{
71838+ unsigned int i;
71839+ __u16 num;
71840+ uid_t *uidlist;
71841+ uid_t curuid;
71842+ int realok = 0;
71843+ int effectiveok = 0;
71844+ int fsok = 0;
71845+ uid_t globalreal, globaleffective, globalfs;
71846+
71847+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
71848+ struct user_struct *user;
71849+
71850+ if (!uid_valid(real))
71851+ goto skipit;
71852+
71853+ /* find user based on global namespace */
71854+
71855+ globalreal = GR_GLOBAL_UID(real);
71856+
71857+ user = find_user(make_kuid(&init_user_ns, globalreal));
71858+ if (user == NULL)
71859+ goto skipit;
71860+
71861+ if (gr_process_kernel_setuid_ban(user)) {
71862+ /* for find_user */
71863+ free_uid(user);
71864+ return 1;
71865+ }
71866+
71867+ /* for find_user */
71868+ free_uid(user);
71869+
71870+skipit:
71871+#endif
71872+
71873+ if (unlikely(!(gr_status & GR_READY)))
71874+ return 0;
71875+
71876+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71877+ gr_log_learn_uid_change(real, effective, fs);
71878+
71879+ num = current->acl->user_trans_num;
71880+ uidlist = current->acl->user_transitions;
71881+
71882+ if (uidlist == NULL)
71883+ return 0;
71884+
71885+ if (!uid_valid(real)) {
71886+ realok = 1;
71887+ globalreal = (uid_t)-1;
71888+ } else {
71889+ globalreal = GR_GLOBAL_UID(real);
71890+ }
71891+ if (!uid_valid(effective)) {
71892+ effectiveok = 1;
71893+ globaleffective = (uid_t)-1;
71894+ } else {
71895+ globaleffective = GR_GLOBAL_UID(effective);
71896+ }
71897+ if (!uid_valid(fs)) {
71898+ fsok = 1;
71899+ globalfs = (uid_t)-1;
71900+ } else {
71901+ globalfs = GR_GLOBAL_UID(fs);
71902+ }
71903+
71904+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
71905+ for (i = 0; i < num; i++) {
71906+ curuid = uidlist[i];
71907+ if (globalreal == curuid)
71908+ realok = 1;
71909+ if (globaleffective == curuid)
71910+ effectiveok = 1;
71911+ if (globalfs == curuid)
71912+ fsok = 1;
71913+ }
71914+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
71915+ for (i = 0; i < num; i++) {
71916+ curuid = uidlist[i];
71917+ if (globalreal == curuid)
71918+ break;
71919+ if (globaleffective == curuid)
71920+ break;
71921+ if (globalfs == curuid)
71922+ break;
71923+ }
71924+ /* not in deny list */
71925+ if (i == num) {
71926+ realok = 1;
71927+ effectiveok = 1;
71928+ fsok = 1;
71929+ }
71930+ }
71931+
71932+ if (realok && effectiveok && fsok)
71933+ return 0;
71934+ else {
71935+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71936+ return 1;
71937+ }
71938+}
71939+
71940+int
71941+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
71942+{
71943+ unsigned int i;
71944+ __u16 num;
71945+ gid_t *gidlist;
71946+ gid_t curgid;
71947+ int realok = 0;
71948+ int effectiveok = 0;
71949+ int fsok = 0;
71950+ gid_t globalreal, globaleffective, globalfs;
71951+
71952+ if (unlikely(!(gr_status & GR_READY)))
71953+ return 0;
71954+
71955+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71956+ gr_log_learn_gid_change(real, effective, fs);
71957+
71958+ num = current->acl->group_trans_num;
71959+ gidlist = current->acl->group_transitions;
71960+
71961+ if (gidlist == NULL)
71962+ return 0;
71963+
71964+ if (!gid_valid(real)) {
71965+ realok = 1;
71966+ globalreal = (gid_t)-1;
71967+ } else {
71968+ globalreal = GR_GLOBAL_GID(real);
71969+ }
71970+ if (!gid_valid(effective)) {
71971+ effectiveok = 1;
71972+ globaleffective = (gid_t)-1;
71973+ } else {
71974+ globaleffective = GR_GLOBAL_GID(effective);
71975+ }
71976+ if (!gid_valid(fs)) {
71977+ fsok = 1;
71978+ globalfs = (gid_t)-1;
71979+ } else {
71980+ globalfs = GR_GLOBAL_GID(fs);
71981+ }
71982+
71983+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
71984+ for (i = 0; i < num; i++) {
71985+ curgid = gidlist[i];
71986+ if (globalreal == curgid)
71987+ realok = 1;
71988+ if (globaleffective == curgid)
71989+ effectiveok = 1;
71990+ if (globalfs == curgid)
71991+ fsok = 1;
71992+ }
71993+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
71994+ for (i = 0; i < num; i++) {
71995+ curgid = gidlist[i];
71996+ if (globalreal == curgid)
71997+ break;
71998+ if (globaleffective == curgid)
71999+ break;
72000+ if (globalfs == curgid)
72001+ break;
72002+ }
72003+ /* not in deny list */
72004+ if (i == num) {
72005+ realok = 1;
72006+ effectiveok = 1;
72007+ fsok = 1;
72008+ }
72009+ }
72010+
72011+ if (realok && effectiveok && fsok)
72012+ return 0;
72013+ else {
72014+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
72015+ return 1;
72016+ }
72017+}
72018+
72019+extern int gr_acl_is_capable(const int cap);
72020+
72021+void
72022+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
72023+{
72024+ struct acl_role_label *role = task->role;
72025+ struct acl_subject_label *subj = NULL;
72026+ struct acl_object_label *obj;
72027+ struct file *filp;
72028+ uid_t uid;
72029+ gid_t gid;
72030+
72031+ if (unlikely(!(gr_status & GR_READY)))
72032+ return;
72033+
72034+ uid = GR_GLOBAL_UID(kuid);
72035+ gid = GR_GLOBAL_GID(kgid);
72036+
72037+ filp = task->exec_file;
72038+
72039+ /* kernel process, we'll give them the kernel role */
72040+ if (unlikely(!filp)) {
72041+ task->role = running_polstate.kernel_role;
72042+ task->acl = running_polstate.kernel_role->root_label;
72043+ return;
72044+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
72045+ /* save the current ip at time of role lookup so that the proper
72046+ IP will be learned for role_allowed_ip */
72047+ task->signal->saved_ip = task->signal->curr_ip;
72048+ role = lookup_acl_role_label(task, uid, gid);
72049+ }
72050+
72051+ /* don't change the role if we're not a privileged process */
72052+ if (role && task->role != role &&
72053+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
72054+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
72055+ return;
72056+
72057+ /* perform subject lookup in possibly new role
72058+ we can use this result below in the case where role == task->role
72059+ */
72060+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
72061+
72062+ /* if we changed uid/gid, but result in the same role
72063+ and are using inheritance, don't lose the inherited subject
72064+ if current subject is other than what normal lookup
72065+ would result in, we arrived via inheritance, don't
72066+ lose subject
72067+ */
72068+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
72069+ (subj == task->acl)))
72070+ task->acl = subj;
72071+
72072+ /* leave task->inherited unaffected */
72073+
72074+ task->role = role;
72075+
72076+ task->is_writable = 0;
72077+
72078+ /* ignore additional mmap checks for processes that are writable
72079+ by the default ACL */
72080+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
72081+ if (unlikely(obj->mode & GR_WRITE))
72082+ task->is_writable = 1;
72083+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
72084+ if (unlikely(obj->mode & GR_WRITE))
72085+ task->is_writable = 1;
72086+
72087+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72088+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72089+#endif
72090+
72091+ gr_set_proc_res(task);
72092+
72093+ return;
72094+}
72095+
72096+int
72097+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
72098+ const int unsafe_flags)
72099+{
72100+ struct task_struct *task = current;
72101+ struct acl_subject_label *newacl;
72102+ struct acl_object_label *obj;
72103+ __u32 retmode;
72104+
72105+ if (unlikely(!(gr_status & GR_READY)))
72106+ return 0;
72107+
72108+ newacl = chk_subj_label(dentry, mnt, task->role);
72109+
72110+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
72111+ did an exec
72112+ */
72113+ rcu_read_lock();
72114+ read_lock(&tasklist_lock);
72115+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
72116+ (task->parent->acl->mode & GR_POVERRIDE))) {
72117+ read_unlock(&tasklist_lock);
72118+ rcu_read_unlock();
72119+ goto skip_check;
72120+ }
72121+ read_unlock(&tasklist_lock);
72122+ rcu_read_unlock();
72123+
72124+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
72125+ !(task->role->roletype & GR_ROLE_GOD) &&
72126+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
72127+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
72128+ if (unsafe_flags & LSM_UNSAFE_SHARE)
72129+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
72130+ else
72131+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
72132+ return -EACCES;
72133+ }
72134+
72135+skip_check:
72136+
72137+ obj = chk_obj_label(dentry, mnt, task->acl);
72138+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
72139+
72140+ if (!(task->acl->mode & GR_INHERITLEARN) &&
72141+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
72142+ if (obj->nested)
72143+ task->acl = obj->nested;
72144+ else
72145+ task->acl = newacl;
72146+ task->inherited = 0;
72147+ } else {
72148+ task->inherited = 1;
72149+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
72150+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
72151+ }
72152+
72153+ task->is_writable = 0;
72154+
72155+ /* ignore additional mmap checks for processes that are writable
72156+ by the default ACL */
72157+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
72158+ if (unlikely(obj->mode & GR_WRITE))
72159+ task->is_writable = 1;
72160+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
72161+ if (unlikely(obj->mode & GR_WRITE))
72162+ task->is_writable = 1;
72163+
72164+ gr_set_proc_res(task);
72165+
72166+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72167+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72168+#endif
72169+ return 0;
72170+}
72171+
72172+/* always called with valid inodev ptr */
72173+static void
72174+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
72175+{
72176+ struct acl_object_label *matchpo;
72177+ struct acl_subject_label *matchps;
72178+ struct acl_subject_label *subj;
72179+ struct acl_role_label *role;
72180+ unsigned int x;
72181+
72182+ FOR_EACH_ROLE_START(role)
72183+ FOR_EACH_SUBJECT_START(role, subj, x)
72184+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72185+ matchpo->mode |= GR_DELETED;
72186+ FOR_EACH_SUBJECT_END(subj,x)
72187+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
72188+ /* nested subjects aren't in the role's subj_hash table */
72189+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72190+ matchpo->mode |= GR_DELETED;
72191+ FOR_EACH_NESTED_SUBJECT_END(subj)
72192+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
72193+ matchps->mode |= GR_DELETED;
72194+ FOR_EACH_ROLE_END(role)
72195+
72196+ inodev->nentry->deleted = 1;
72197+
72198+ return;
72199+}
72200+
72201+void
72202+gr_handle_delete(const ino_t ino, const dev_t dev)
72203+{
72204+ struct inodev_entry *inodev;
72205+
72206+ if (unlikely(!(gr_status & GR_READY)))
72207+ return;
72208+
72209+ write_lock(&gr_inode_lock);
72210+ inodev = lookup_inodev_entry(ino, dev);
72211+ if (inodev != NULL)
72212+ do_handle_delete(inodev, ino, dev);
72213+ write_unlock(&gr_inode_lock);
72214+
72215+ return;
72216+}
72217+
72218+static void
72219+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
72220+ const ino_t newinode, const dev_t newdevice,
72221+ struct acl_subject_label *subj)
72222+{
72223+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
72224+ struct acl_object_label *match;
72225+
72226+ match = subj->obj_hash[index];
72227+
72228+ while (match && (match->inode != oldinode ||
72229+ match->device != olddevice ||
72230+ !(match->mode & GR_DELETED)))
72231+ match = match->next;
72232+
72233+ if (match && (match->inode == oldinode)
72234+ && (match->device == olddevice)
72235+ && (match->mode & GR_DELETED)) {
72236+ if (match->prev == NULL) {
72237+ subj->obj_hash[index] = match->next;
72238+ if (match->next != NULL)
72239+ match->next->prev = NULL;
72240+ } else {
72241+ match->prev->next = match->next;
72242+ if (match->next != NULL)
72243+ match->next->prev = match->prev;
72244+ }
72245+ match->prev = NULL;
72246+ match->next = NULL;
72247+ match->inode = newinode;
72248+ match->device = newdevice;
72249+ match->mode &= ~GR_DELETED;
72250+
72251+ insert_acl_obj_label(match, subj);
72252+ }
72253+
72254+ return;
72255+}
72256+
72257+static void
72258+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
72259+ const ino_t newinode, const dev_t newdevice,
72260+ struct acl_role_label *role)
72261+{
72262+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
72263+ struct acl_subject_label *match;
72264+
72265+ match = role->subj_hash[index];
72266+
72267+ while (match && (match->inode != oldinode ||
72268+ match->device != olddevice ||
72269+ !(match->mode & GR_DELETED)))
72270+ match = match->next;
72271+
72272+ if (match && (match->inode == oldinode)
72273+ && (match->device == olddevice)
72274+ && (match->mode & GR_DELETED)) {
72275+ if (match->prev == NULL) {
72276+ role->subj_hash[index] = match->next;
72277+ if (match->next != NULL)
72278+ match->next->prev = NULL;
72279+ } else {
72280+ match->prev->next = match->next;
72281+ if (match->next != NULL)
72282+ match->next->prev = match->prev;
72283+ }
72284+ match->prev = NULL;
72285+ match->next = NULL;
72286+ match->inode = newinode;
72287+ match->device = newdevice;
72288+ match->mode &= ~GR_DELETED;
72289+
72290+ insert_acl_subj_label(match, role);
72291+ }
72292+
72293+ return;
72294+}
72295+
72296+static void
72297+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
72298+ const ino_t newinode, const dev_t newdevice)
72299+{
72300+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
72301+ struct inodev_entry *match;
72302+
72303+ match = running_polstate.inodev_set.i_hash[index];
72304+
72305+ while (match && (match->nentry->inode != oldinode ||
72306+ match->nentry->device != olddevice || !match->nentry->deleted))
72307+ match = match->next;
72308+
72309+ if (match && (match->nentry->inode == oldinode)
72310+ && (match->nentry->device == olddevice) &&
72311+ match->nentry->deleted) {
72312+ if (match->prev == NULL) {
72313+ running_polstate.inodev_set.i_hash[index] = match->next;
72314+ if (match->next != NULL)
72315+ match->next->prev = NULL;
72316+ } else {
72317+ match->prev->next = match->next;
72318+ if (match->next != NULL)
72319+ match->next->prev = match->prev;
72320+ }
72321+ match->prev = NULL;
72322+ match->next = NULL;
72323+ match->nentry->inode = newinode;
72324+ match->nentry->device = newdevice;
72325+ match->nentry->deleted = 0;
72326+
72327+ insert_inodev_entry(match);
72328+ }
72329+
72330+ return;
72331+}
72332+
72333+static void
72334+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
72335+{
72336+ struct acl_subject_label *subj;
72337+ struct acl_role_label *role;
72338+ unsigned int x;
72339+
72340+ FOR_EACH_ROLE_START(role)
72341+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
72342+
72343+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
72344+ if ((subj->inode == ino) && (subj->device == dev)) {
72345+ subj->inode = ino;
72346+ subj->device = dev;
72347+ }
72348+ /* nested subjects aren't in the role's subj_hash table */
72349+ update_acl_obj_label(matchn->inode, matchn->device,
72350+ ino, dev, subj);
72351+ FOR_EACH_NESTED_SUBJECT_END(subj)
72352+ FOR_EACH_SUBJECT_START(role, subj, x)
72353+ update_acl_obj_label(matchn->inode, matchn->device,
72354+ ino, dev, subj);
72355+ FOR_EACH_SUBJECT_END(subj,x)
72356+ FOR_EACH_ROLE_END(role)
72357+
72358+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
72359+
72360+ return;
72361+}
72362+
72363+static void
72364+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
72365+ const struct vfsmount *mnt)
72366+{
72367+ ino_t ino = dentry->d_inode->i_ino;
72368+ dev_t dev = __get_dev(dentry);
72369+
72370+ __do_handle_create(matchn, ino, dev);
72371+
72372+ return;
72373+}
72374+
72375+void
72376+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
72377+{
72378+ struct name_entry *matchn;
72379+
72380+ if (unlikely(!(gr_status & GR_READY)))
72381+ return;
72382+
72383+ preempt_disable();
72384+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
72385+
72386+ if (unlikely((unsigned long)matchn)) {
72387+ write_lock(&gr_inode_lock);
72388+ do_handle_create(matchn, dentry, mnt);
72389+ write_unlock(&gr_inode_lock);
72390+ }
72391+ preempt_enable();
72392+
72393+ return;
72394+}
72395+
72396+void
72397+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
72398+{
72399+ struct name_entry *matchn;
72400+
72401+ if (unlikely(!(gr_status & GR_READY)))
72402+ return;
72403+
72404+ preempt_disable();
72405+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
72406+
72407+ if (unlikely((unsigned long)matchn)) {
72408+ write_lock(&gr_inode_lock);
72409+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
72410+ write_unlock(&gr_inode_lock);
72411+ }
72412+ preempt_enable();
72413+
72414+ return;
72415+}
72416+
72417+void
72418+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
72419+ struct dentry *old_dentry,
72420+ struct dentry *new_dentry,
72421+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
72422+{
72423+ struct name_entry *matchn;
72424+ struct name_entry *matchn2 = NULL;
72425+ struct inodev_entry *inodev;
72426+ struct inode *inode = new_dentry->d_inode;
72427+ ino_t old_ino = old_dentry->d_inode->i_ino;
72428+ dev_t old_dev = __get_dev(old_dentry);
72429+ unsigned int exchange = flags & RENAME_EXCHANGE;
72430+
72431+ /* vfs_rename swaps the name and parent link for old_dentry and
72432+ new_dentry
72433+ at this point, old_dentry has the new name, parent link, and inode
72434+ for the renamed file
72435+ if a file is being replaced by a rename, new_dentry has the inode
72436+ and name for the replaced file
72437+ */
72438+
72439+ if (unlikely(!(gr_status & GR_READY)))
72440+ return;
72441+
72442+ preempt_disable();
72443+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
72444+
72445+ /* exchange cases:
72446+ a filename exists for the source, but not dest
72447+ do a recreate on source
72448+ a filename exists for the dest, but not source
72449+ do a recreate on dest
72450+ a filename exists for both source and dest
72451+ delete source and dest, then create source and dest
72452+ a filename exists for neither source nor dest
72453+ no updates needed
72454+
72455+ the name entry lookups get us the old inode/dev associated with
72456+ each name, so do the deletes first (if possible) so that when
72457+ we do the create, we pick up on the right entries
72458+ */
72459+
72460+ if (exchange)
72461+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
72462+
72463+ /* we wouldn't have to check d_inode if it weren't for
72464+ NFS silly-renaming
72465+ */
72466+
72467+ write_lock(&gr_inode_lock);
72468+ if (unlikely((replace || exchange) && inode)) {
72469+ ino_t new_ino = inode->i_ino;
72470+ dev_t new_dev = __get_dev(new_dentry);
72471+
72472+ inodev = lookup_inodev_entry(new_ino, new_dev);
72473+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
72474+ do_handle_delete(inodev, new_ino, new_dev);
72475+ }
72476+
72477+ inodev = lookup_inodev_entry(old_ino, old_dev);
72478+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
72479+ do_handle_delete(inodev, old_ino, old_dev);
72480+
72481+ if (unlikely(matchn != NULL))
72482+ do_handle_create(matchn, old_dentry, mnt);
72483+
72484+ if (unlikely(matchn2 != NULL))
72485+ do_handle_create(matchn2, new_dentry, mnt);
72486+
72487+ write_unlock(&gr_inode_lock);
72488+ preempt_enable();
72489+
72490+ return;
72491+}
72492+
72493+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
72494+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
72495+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
72496+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
72497+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
72498+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
72499+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
72500+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
72501+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
72502+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
72503+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
72504+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
72505+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
72506+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
72507+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
72508+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
72509+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
72510+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
72511+};
72512+
72513+void
72514+gr_learn_resource(const struct task_struct *task,
72515+ const int res, const unsigned long wanted, const int gt)
72516+{
72517+ struct acl_subject_label *acl;
72518+ const struct cred *cred;
72519+
72520+ if (unlikely((gr_status & GR_READY) &&
72521+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
72522+ goto skip_reslog;
72523+
72524+ gr_log_resource(task, res, wanted, gt);
72525+skip_reslog:
72526+
72527+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
72528+ return;
72529+
72530+ acl = task->acl;
72531+
72532+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
72533+ !(acl->resmask & (1U << (unsigned short) res))))
72534+ return;
72535+
72536+ if (wanted >= acl->res[res].rlim_cur) {
72537+ unsigned long res_add;
72538+
72539+ res_add = wanted + res_learn_bumps[res];
72540+
72541+ acl->res[res].rlim_cur = res_add;
72542+
72543+ if (wanted > acl->res[res].rlim_max)
72544+ acl->res[res].rlim_max = res_add;
72545+
72546+ /* only log the subject filename, since resource logging is supported for
72547+ single-subject learning only */
72548+ rcu_read_lock();
72549+ cred = __task_cred(task);
72550+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72551+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
72552+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
72553+ "", (unsigned long) res, &task->signal->saved_ip);
72554+ rcu_read_unlock();
72555+ }
72556+
72557+ return;
72558+}
72559+EXPORT_SYMBOL_GPL(gr_learn_resource);
72560+#endif
72561+
72562+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
72563+void
72564+pax_set_initial_flags(struct linux_binprm *bprm)
72565+{
72566+ struct task_struct *task = current;
72567+ struct acl_subject_label *proc;
72568+ unsigned long flags;
72569+
72570+ if (unlikely(!(gr_status & GR_READY)))
72571+ return;
72572+
72573+ flags = pax_get_flags(task);
72574+
72575+ proc = task->acl;
72576+
72577+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
72578+ flags &= ~MF_PAX_PAGEEXEC;
72579+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
72580+ flags &= ~MF_PAX_SEGMEXEC;
72581+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
72582+ flags &= ~MF_PAX_RANDMMAP;
72583+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
72584+ flags &= ~MF_PAX_EMUTRAMP;
72585+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
72586+ flags &= ~MF_PAX_MPROTECT;
72587+
72588+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
72589+ flags |= MF_PAX_PAGEEXEC;
72590+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
72591+ flags |= MF_PAX_SEGMEXEC;
72592+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
72593+ flags |= MF_PAX_RANDMMAP;
72594+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
72595+ flags |= MF_PAX_EMUTRAMP;
72596+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
72597+ flags |= MF_PAX_MPROTECT;
72598+
72599+ pax_set_flags(task, flags);
72600+
72601+ return;
72602+}
72603+#endif
72604+
72605+int
72606+gr_handle_proc_ptrace(struct task_struct *task)
72607+{
72608+ struct file *filp;
72609+ struct task_struct *tmp = task;
72610+ struct task_struct *curtemp = current;
72611+ __u32 retmode;
72612+
72613+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
72614+ if (unlikely(!(gr_status & GR_READY)))
72615+ return 0;
72616+#endif
72617+
72618+ read_lock(&tasklist_lock);
72619+ read_lock(&grsec_exec_file_lock);
72620+ filp = task->exec_file;
72621+
72622+ while (task_pid_nr(tmp) > 0) {
72623+ if (tmp == curtemp)
72624+ break;
72625+ tmp = tmp->real_parent;
72626+ }
72627+
72628+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
72629+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
72630+ read_unlock(&grsec_exec_file_lock);
72631+ read_unlock(&tasklist_lock);
72632+ return 1;
72633+ }
72634+
72635+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
72636+ if (!(gr_status & GR_READY)) {
72637+ read_unlock(&grsec_exec_file_lock);
72638+ read_unlock(&tasklist_lock);
72639+ return 0;
72640+ }
72641+#endif
72642+
72643+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
72644+ read_unlock(&grsec_exec_file_lock);
72645+ read_unlock(&tasklist_lock);
72646+
72647+ if (retmode & GR_NOPTRACE)
72648+ return 1;
72649+
72650+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
72651+ && (current->acl != task->acl || (current->acl != current->role->root_label
72652+ && task_pid_nr(current) != task_pid_nr(task))))
72653+ return 1;
72654+
72655+ return 0;
72656+}
72657+
72658+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
72659+{
72660+ if (unlikely(!(gr_status & GR_READY)))
72661+ return;
72662+
72663+ if (!(current->role->roletype & GR_ROLE_GOD))
72664+ return;
72665+
72666+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
72667+ p->role->rolename, gr_task_roletype_to_char(p),
72668+ p->acl->filename);
72669+}
72670+
72671+int
72672+gr_handle_ptrace(struct task_struct *task, const long request)
72673+{
72674+ struct task_struct *tmp = task;
72675+ struct task_struct *curtemp = current;
72676+ __u32 retmode;
72677+
72678+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
72679+ if (unlikely(!(gr_status & GR_READY)))
72680+ return 0;
72681+#endif
72682+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
72683+ read_lock(&tasklist_lock);
72684+ while (task_pid_nr(tmp) > 0) {
72685+ if (tmp == curtemp)
72686+ break;
72687+ tmp = tmp->real_parent;
72688+ }
72689+
72690+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
72691+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
72692+ read_unlock(&tasklist_lock);
72693+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72694+ return 1;
72695+ }
72696+ read_unlock(&tasklist_lock);
72697+ }
72698+
72699+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
72700+ if (!(gr_status & GR_READY))
72701+ return 0;
72702+#endif
72703+
72704+ read_lock(&grsec_exec_file_lock);
72705+ if (unlikely(!task->exec_file)) {
72706+ read_unlock(&grsec_exec_file_lock);
72707+ return 0;
72708+ }
72709+
72710+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
72711+ read_unlock(&grsec_exec_file_lock);
72712+
72713+ if (retmode & GR_NOPTRACE) {
72714+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72715+ return 1;
72716+ }
72717+
72718+ if (retmode & GR_PTRACERD) {
72719+ switch (request) {
72720+ case PTRACE_SEIZE:
72721+ case PTRACE_POKETEXT:
72722+ case PTRACE_POKEDATA:
72723+ case PTRACE_POKEUSR:
72724+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
72725+ case PTRACE_SETREGS:
72726+ case PTRACE_SETFPREGS:
72727+#endif
72728+#ifdef CONFIG_X86
72729+ case PTRACE_SETFPXREGS:
72730+#endif
72731+#ifdef CONFIG_ALTIVEC
72732+ case PTRACE_SETVRREGS:
72733+#endif
72734+ return 1;
72735+ default:
72736+ return 0;
72737+ }
72738+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
72739+ !(current->role->roletype & GR_ROLE_GOD) &&
72740+ (current->acl != task->acl)) {
72741+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72742+ return 1;
72743+ }
72744+
72745+ return 0;
72746+}
72747+
72748+static int is_writable_mmap(const struct file *filp)
72749+{
72750+ struct task_struct *task = current;
72751+ struct acl_object_label *obj, *obj2;
72752+
72753+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
72754+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
72755+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
72756+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
72757+ task->role->root_label);
72758+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
72759+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
72760+ return 1;
72761+ }
72762+ }
72763+ return 0;
72764+}
72765+
72766+int
72767+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
72768+{
72769+ __u32 mode;
72770+
72771+ if (unlikely(!file || !(prot & PROT_EXEC)))
72772+ return 1;
72773+
72774+ if (is_writable_mmap(file))
72775+ return 0;
72776+
72777+ mode =
72778+ gr_search_file(file->f_path.dentry,
72779+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
72780+ file->f_path.mnt);
72781+
72782+ if (!gr_tpe_allow(file))
72783+ return 0;
72784+
72785+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
72786+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72787+ return 0;
72788+ } else if (unlikely(!(mode & GR_EXEC))) {
72789+ return 0;
72790+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
72791+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72792+ return 1;
72793+ }
72794+
72795+ return 1;
72796+}
72797+
72798+int
72799+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
72800+{
72801+ __u32 mode;
72802+
72803+ if (unlikely(!file || !(prot & PROT_EXEC)))
72804+ return 1;
72805+
72806+ if (is_writable_mmap(file))
72807+ return 0;
72808+
72809+ mode =
72810+ gr_search_file(file->f_path.dentry,
72811+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
72812+ file->f_path.mnt);
72813+
72814+ if (!gr_tpe_allow(file))
72815+ return 0;
72816+
72817+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
72818+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72819+ return 0;
72820+ } else if (unlikely(!(mode & GR_EXEC))) {
72821+ return 0;
72822+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
72823+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72824+ return 1;
72825+ }
72826+
72827+ return 1;
72828+}
72829+
72830+void
72831+gr_acl_handle_psacct(struct task_struct *task, const long code)
72832+{
72833+ unsigned long runtime, cputime;
72834+ cputime_t utime, stime;
72835+ unsigned int wday, cday;
72836+ __u8 whr, chr;
72837+ __u8 wmin, cmin;
72838+ __u8 wsec, csec;
72839+ struct timespec timeval;
72840+
72841+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
72842+ !(task->acl->mode & GR_PROCACCT)))
72843+ return;
72844+
72845+ do_posix_clock_monotonic_gettime(&timeval);
72846+ runtime = timeval.tv_sec - task->start_time.tv_sec;
72847+ wday = runtime / (60 * 60 * 24);
72848+ runtime -= wday * (60 * 60 * 24);
72849+ whr = runtime / (60 * 60);
72850+ runtime -= whr * (60 * 60);
72851+ wmin = runtime / 60;
72852+ runtime -= wmin * 60;
72853+ wsec = runtime;
72854+
72855+ task_cputime(task, &utime, &stime);
72856+ cputime = cputime_to_secs(utime + stime);
72857+ cday = cputime / (60 * 60 * 24);
72858+ cputime -= cday * (60 * 60 * 24);
72859+ chr = cputime / (60 * 60);
72860+ cputime -= chr * (60 * 60);
72861+ cmin = cputime / 60;
72862+ cputime -= cmin * 60;
72863+ csec = cputime;
72864+
72865+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
72866+
72867+ return;
72868+}
72869+
72870+#ifdef CONFIG_TASKSTATS
72871+int gr_is_taskstats_denied(int pid)
72872+{
72873+ struct task_struct *task;
72874+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72875+ const struct cred *cred;
72876+#endif
72877+ int ret = 0;
72878+
72879+ /* restrict taskstats viewing to un-chrooted root users
72880+ who have the 'view' subject flag if the RBAC system is enabled
72881+ */
72882+
72883+ rcu_read_lock();
72884+ read_lock(&tasklist_lock);
72885+ task = find_task_by_vpid(pid);
72886+ if (task) {
72887+#ifdef CONFIG_GRKERNSEC_CHROOT
72888+ if (proc_is_chrooted(task))
72889+ ret = -EACCES;
72890+#endif
72891+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72892+ cred = __task_cred(task);
72893+#ifdef CONFIG_GRKERNSEC_PROC_USER
72894+ if (gr_is_global_nonroot(cred->uid))
72895+ ret = -EACCES;
72896+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72897+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
72898+ ret = -EACCES;
72899+#endif
72900+#endif
72901+ if (gr_status & GR_READY) {
72902+ if (!(task->acl->mode & GR_VIEW))
72903+ ret = -EACCES;
72904+ }
72905+ } else
72906+ ret = -ENOENT;
72907+
72908+ read_unlock(&tasklist_lock);
72909+ rcu_read_unlock();
72910+
72911+ return ret;
72912+}
72913+#endif
72914+
72915+/* AUXV entries are filled via a descendant of search_binary_handler
72916+ after we've already applied the subject for the target
72917+*/
72918+int gr_acl_enable_at_secure(void)
72919+{
72920+ if (unlikely(!(gr_status & GR_READY)))
72921+ return 0;
72922+
72923+ if (current->acl->mode & GR_ATSECURE)
72924+ return 1;
72925+
72926+ return 0;
72927+}
72928+
72929+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
72930+{
72931+ struct task_struct *task = current;
72932+ struct dentry *dentry = file->f_path.dentry;
72933+ struct vfsmount *mnt = file->f_path.mnt;
72934+ struct acl_object_label *obj, *tmp;
72935+ struct acl_subject_label *subj;
72936+ unsigned int bufsize;
72937+ int is_not_root;
72938+ char *path;
72939+ dev_t dev = __get_dev(dentry);
72940+
72941+ if (unlikely(!(gr_status & GR_READY)))
72942+ return 1;
72943+
72944+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72945+ return 1;
72946+
72947+ /* ignore Eric Biederman */
72948+ if (IS_PRIVATE(dentry->d_inode))
72949+ return 1;
72950+
72951+ subj = task->acl;
72952+ read_lock(&gr_inode_lock);
72953+ do {
72954+ obj = lookup_acl_obj_label(ino, dev, subj);
72955+ if (obj != NULL) {
72956+ read_unlock(&gr_inode_lock);
72957+ return (obj->mode & GR_FIND) ? 1 : 0;
72958+ }
72959+ } while ((subj = subj->parent_subject));
72960+ read_unlock(&gr_inode_lock);
72961+
72962+ /* this is purely an optimization since we're looking for an object
72963+ for the directory we're doing a readdir on
72964+ if it's possible for any globbed object to match the entry we're
72965+ filling into the directory, then the object we find here will be
72966+ an anchor point with attached globbed objects
72967+ */
72968+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
72969+ if (obj->globbed == NULL)
72970+ return (obj->mode & GR_FIND) ? 1 : 0;
72971+
72972+ is_not_root = ((obj->filename[0] == '/') &&
72973+ (obj->filename[1] == '\0')) ? 0 : 1;
72974+ bufsize = PAGE_SIZE - namelen - is_not_root;
72975+
72976+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
72977+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
72978+ return 1;
72979+
72980+ preempt_disable();
72981+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
72982+ bufsize);
72983+
72984+ bufsize = strlen(path);
72985+
72986+ /* if base is "/", don't append an additional slash */
72987+ if (is_not_root)
72988+ *(path + bufsize) = '/';
72989+ memcpy(path + bufsize + is_not_root, name, namelen);
72990+ *(path + bufsize + namelen + is_not_root) = '\0';
72991+
72992+ tmp = obj->globbed;
72993+ while (tmp) {
72994+ if (!glob_match(tmp->filename, path)) {
72995+ preempt_enable();
72996+ return (tmp->mode & GR_FIND) ? 1 : 0;
72997+ }
72998+ tmp = tmp->next;
72999+ }
73000+ preempt_enable();
73001+ return (obj->mode & GR_FIND) ? 1 : 0;
73002+}
73003+
73004+void gr_put_exec_file(struct task_struct *task)
73005+{
73006+ struct file *filp;
73007+
73008+ write_lock(&grsec_exec_file_lock);
73009+ filp = task->exec_file;
73010+ task->exec_file = NULL;
73011+ write_unlock(&grsec_exec_file_lock);
73012+
73013+ if (filp)
73014+ fput(filp);
73015+
73016+ return;
73017+}
73018+
73019+
73020+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
73021+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
73022+#endif
73023+#ifdef CONFIG_SECURITY
73024+EXPORT_SYMBOL_GPL(gr_check_user_change);
73025+EXPORT_SYMBOL_GPL(gr_check_group_change);
73026+#endif
73027+
73028diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
73029new file mode 100644
73030index 0000000..18ffbbd
73031--- /dev/null
73032+++ b/grsecurity/gracl_alloc.c
73033@@ -0,0 +1,105 @@
73034+#include <linux/kernel.h>
73035+#include <linux/mm.h>
73036+#include <linux/slab.h>
73037+#include <linux/vmalloc.h>
73038+#include <linux/gracl.h>
73039+#include <linux/grsecurity.h>
73040+
73041+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
73042+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
73043+
73044+static __inline__ int
73045+alloc_pop(void)
73046+{
73047+ if (current_alloc_state->alloc_stack_next == 1)
73048+ return 0;
73049+
73050+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
73051+
73052+ current_alloc_state->alloc_stack_next--;
73053+
73054+ return 1;
73055+}
73056+
73057+static __inline__ int
73058+alloc_push(void *buf)
73059+{
73060+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
73061+ return 1;
73062+
73063+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
73064+
73065+ current_alloc_state->alloc_stack_next++;
73066+
73067+ return 0;
73068+}
73069+
73070+void *
73071+acl_alloc(unsigned long len)
73072+{
73073+ void *ret = NULL;
73074+
73075+ if (!len || len > PAGE_SIZE)
73076+ goto out;
73077+
73078+ ret = kmalloc(len, GFP_KERNEL);
73079+
73080+ if (ret) {
73081+ if (alloc_push(ret)) {
73082+ kfree(ret);
73083+ ret = NULL;
73084+ }
73085+ }
73086+
73087+out:
73088+ return ret;
73089+}
73090+
73091+void *
73092+acl_alloc_num(unsigned long num, unsigned long len)
73093+{
73094+ if (!len || (num > (PAGE_SIZE / len)))
73095+ return NULL;
73096+
73097+ return acl_alloc(num * len);
73098+}
73099+
73100+void
73101+acl_free_all(void)
73102+{
73103+ if (!current_alloc_state->alloc_stack)
73104+ return;
73105+
73106+ while (alloc_pop()) ;
73107+
73108+ if (current_alloc_state->alloc_stack) {
73109+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
73110+ kfree(current_alloc_state->alloc_stack);
73111+ else
73112+ vfree(current_alloc_state->alloc_stack);
73113+ }
73114+
73115+ current_alloc_state->alloc_stack = NULL;
73116+ current_alloc_state->alloc_stack_size = 1;
73117+ current_alloc_state->alloc_stack_next = 1;
73118+
73119+ return;
73120+}
73121+
73122+int
73123+acl_alloc_stack_init(unsigned long size)
73124+{
73125+ if ((size * sizeof (void *)) <= PAGE_SIZE)
73126+ current_alloc_state->alloc_stack =
73127+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
73128+ else
73129+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
73130+
73131+ current_alloc_state->alloc_stack_size = size;
73132+ current_alloc_state->alloc_stack_next = 1;
73133+
73134+ if (!current_alloc_state->alloc_stack)
73135+ return 0;
73136+ else
73137+ return 1;
73138+}
73139diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
73140new file mode 100644
73141index 0000000..1a94c11
73142--- /dev/null
73143+++ b/grsecurity/gracl_cap.c
73144@@ -0,0 +1,127 @@
73145+#include <linux/kernel.h>
73146+#include <linux/module.h>
73147+#include <linux/sched.h>
73148+#include <linux/gracl.h>
73149+#include <linux/grsecurity.h>
73150+#include <linux/grinternal.h>
73151+
73152+extern const char *captab_log[];
73153+extern int captab_log_entries;
73154+
73155+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
73156+{
73157+ struct acl_subject_label *curracl;
73158+
73159+ if (!gr_acl_is_enabled())
73160+ return 1;
73161+
73162+ curracl = task->acl;
73163+
73164+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
73165+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
73166+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
73167+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
73168+ gr_to_filename(task->exec_file->f_path.dentry,
73169+ task->exec_file->f_path.mnt) : curracl->filename,
73170+ curracl->filename, 0UL,
73171+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
73172+ return 1;
73173+ }
73174+
73175+ return 0;
73176+}
73177+
73178+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
73179+{
73180+ struct acl_subject_label *curracl;
73181+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
73182+ kernel_cap_t cap_audit = __cap_empty_set;
73183+
73184+ if (!gr_acl_is_enabled())
73185+ return 1;
73186+
73187+ curracl = task->acl;
73188+
73189+ cap_drop = curracl->cap_lower;
73190+ cap_mask = curracl->cap_mask;
73191+ cap_audit = curracl->cap_invert_audit;
73192+
73193+ while ((curracl = curracl->parent_subject)) {
73194+ /* if the cap isn't specified in the current computed mask but is specified in the
73195+ current level subject, and is lowered in the current level subject, then add
73196+ it to the set of dropped capabilities
73197+ otherwise, add the current level subject's mask to the current computed mask
73198+ */
73199+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
73200+ cap_raise(cap_mask, cap);
73201+ if (cap_raised(curracl->cap_lower, cap))
73202+ cap_raise(cap_drop, cap);
73203+ if (cap_raised(curracl->cap_invert_audit, cap))
73204+ cap_raise(cap_audit, cap);
73205+ }
73206+ }
73207+
73208+ if (!cap_raised(cap_drop, cap)) {
73209+ if (cap_raised(cap_audit, cap))
73210+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
73211+ return 1;
73212+ }
73213+
73214+ /* only learn the capability use if the process has the capability in the
73215+ general case, the two uses in sys.c of gr_learn_cap are an exception
73216+ to this rule to ensure any role transition involves what the full-learned
73217+ policy believes in a privileged process
73218+ */
73219+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
73220+ return 1;
73221+
73222+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
73223+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
73224+
73225+ return 0;
73226+}
73227+
73228+int
73229+gr_acl_is_capable(const int cap)
73230+{
73231+ return gr_task_acl_is_capable(current, current_cred(), cap);
73232+}
73233+
73234+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
73235+{
73236+ struct acl_subject_label *curracl;
73237+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
73238+
73239+ if (!gr_acl_is_enabled())
73240+ return 1;
73241+
73242+ curracl = task->acl;
73243+
73244+ cap_drop = curracl->cap_lower;
73245+ cap_mask = curracl->cap_mask;
73246+
73247+ while ((curracl = curracl->parent_subject)) {
73248+ /* if the cap isn't specified in the current computed mask but is specified in the
73249+ current level subject, and is lowered in the current level subject, then add
73250+ it to the set of dropped capabilities
73251+ otherwise, add the current level subject's mask to the current computed mask
73252+ */
73253+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
73254+ cap_raise(cap_mask, cap);
73255+ if (cap_raised(curracl->cap_lower, cap))
73256+ cap_raise(cap_drop, cap);
73257+ }
73258+ }
73259+
73260+ if (!cap_raised(cap_drop, cap))
73261+ return 1;
73262+
73263+ return 0;
73264+}
73265+
73266+int
73267+gr_acl_is_capable_nolog(const int cap)
73268+{
73269+ return gr_task_acl_is_capable_nolog(current, cap);
73270+}
73271+
73272diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
73273new file mode 100644
73274index 0000000..ca25605
73275--- /dev/null
73276+++ b/grsecurity/gracl_compat.c
73277@@ -0,0 +1,270 @@
73278+#include <linux/kernel.h>
73279+#include <linux/gracl.h>
73280+#include <linux/compat.h>
73281+#include <linux/gracl_compat.h>
73282+
73283+#include <asm/uaccess.h>
73284+
73285+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
73286+{
73287+ struct gr_arg_wrapper_compat uwrapcompat;
73288+
73289+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
73290+ return -EFAULT;
73291+
73292+ if (((uwrapcompat.version != GRSECURITY_VERSION) &&
73293+ (uwrapcompat.version != 0x2901)) ||
73294+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
73295+ return -EINVAL;
73296+
73297+ uwrap->arg = compat_ptr(uwrapcompat.arg);
73298+ uwrap->version = uwrapcompat.version;
73299+ uwrap->size = sizeof(struct gr_arg);
73300+
73301+ return 0;
73302+}
73303+
73304+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
73305+{
73306+ struct gr_arg_compat argcompat;
73307+
73308+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
73309+ return -EFAULT;
73310+
73311+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
73312+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
73313+ arg->role_db.num_roles = argcompat.role_db.num_roles;
73314+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
73315+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
73316+ arg->role_db.num_objects = argcompat.role_db.num_objects;
73317+
73318+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
73319+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
73320+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
73321+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
73322+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
73323+ arg->segv_device = argcompat.segv_device;
73324+ arg->segv_inode = argcompat.segv_inode;
73325+ arg->segv_uid = argcompat.segv_uid;
73326+ arg->num_sprole_pws = argcompat.num_sprole_pws;
73327+ arg->mode = argcompat.mode;
73328+
73329+ return 0;
73330+}
73331+
73332+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
73333+{
73334+ struct acl_object_label_compat objcompat;
73335+
73336+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
73337+ return -EFAULT;
73338+
73339+ obj->filename = compat_ptr(objcompat.filename);
73340+ obj->inode = objcompat.inode;
73341+ obj->device = objcompat.device;
73342+ obj->mode = objcompat.mode;
73343+
73344+ obj->nested = compat_ptr(objcompat.nested);
73345+ obj->globbed = compat_ptr(objcompat.globbed);
73346+
73347+ obj->prev = compat_ptr(objcompat.prev);
73348+ obj->next = compat_ptr(objcompat.next);
73349+
73350+ return 0;
73351+}
73352+
73353+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73354+{
73355+ unsigned int i;
73356+ struct acl_subject_label_compat subjcompat;
73357+
73358+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
73359+ return -EFAULT;
73360+
73361+ subj->filename = compat_ptr(subjcompat.filename);
73362+ subj->inode = subjcompat.inode;
73363+ subj->device = subjcompat.device;
73364+ subj->mode = subjcompat.mode;
73365+ subj->cap_mask = subjcompat.cap_mask;
73366+ subj->cap_lower = subjcompat.cap_lower;
73367+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
73368+
73369+ for (i = 0; i < GR_NLIMITS; i++) {
73370+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
73371+ subj->res[i].rlim_cur = RLIM_INFINITY;
73372+ else
73373+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
73374+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
73375+ subj->res[i].rlim_max = RLIM_INFINITY;
73376+ else
73377+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
73378+ }
73379+ subj->resmask = subjcompat.resmask;
73380+
73381+ subj->user_trans_type = subjcompat.user_trans_type;
73382+ subj->group_trans_type = subjcompat.group_trans_type;
73383+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
73384+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
73385+ subj->user_trans_num = subjcompat.user_trans_num;
73386+ subj->group_trans_num = subjcompat.group_trans_num;
73387+
73388+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
73389+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
73390+ subj->ip_type = subjcompat.ip_type;
73391+ subj->ips = compat_ptr(subjcompat.ips);
73392+ subj->ip_num = subjcompat.ip_num;
73393+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
73394+
73395+ subj->crashes = subjcompat.crashes;
73396+ subj->expires = subjcompat.expires;
73397+
73398+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
73399+ subj->hash = compat_ptr(subjcompat.hash);
73400+ subj->prev = compat_ptr(subjcompat.prev);
73401+ subj->next = compat_ptr(subjcompat.next);
73402+
73403+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
73404+ subj->obj_hash_size = subjcompat.obj_hash_size;
73405+ subj->pax_flags = subjcompat.pax_flags;
73406+
73407+ return 0;
73408+}
73409+
73410+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
73411+{
73412+ struct acl_role_label_compat rolecompat;
73413+
73414+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
73415+ return -EFAULT;
73416+
73417+ role->rolename = compat_ptr(rolecompat.rolename);
73418+ role->uidgid = rolecompat.uidgid;
73419+ role->roletype = rolecompat.roletype;
73420+
73421+ role->auth_attempts = rolecompat.auth_attempts;
73422+ role->expires = rolecompat.expires;
73423+
73424+ role->root_label = compat_ptr(rolecompat.root_label);
73425+ role->hash = compat_ptr(rolecompat.hash);
73426+
73427+ role->prev = compat_ptr(rolecompat.prev);
73428+ role->next = compat_ptr(rolecompat.next);
73429+
73430+ role->transitions = compat_ptr(rolecompat.transitions);
73431+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
73432+ role->domain_children = compat_ptr(rolecompat.domain_children);
73433+ role->domain_child_num = rolecompat.domain_child_num;
73434+
73435+ role->umask = rolecompat.umask;
73436+
73437+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
73438+ role->subj_hash_size = rolecompat.subj_hash_size;
73439+
73440+ return 0;
73441+}
73442+
73443+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73444+{
73445+ struct role_allowed_ip_compat roleip_compat;
73446+
73447+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
73448+ return -EFAULT;
73449+
73450+ roleip->addr = roleip_compat.addr;
73451+ roleip->netmask = roleip_compat.netmask;
73452+
73453+ roleip->prev = compat_ptr(roleip_compat.prev);
73454+ roleip->next = compat_ptr(roleip_compat.next);
73455+
73456+ return 0;
73457+}
73458+
73459+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
73460+{
73461+ struct role_transition_compat trans_compat;
73462+
73463+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
73464+ return -EFAULT;
73465+
73466+ trans->rolename = compat_ptr(trans_compat.rolename);
73467+
73468+ trans->prev = compat_ptr(trans_compat.prev);
73469+ trans->next = compat_ptr(trans_compat.next);
73470+
73471+ return 0;
73472+
73473+}
73474+
73475+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73476+{
73477+ struct gr_hash_struct_compat hash_compat;
73478+
73479+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
73480+ return -EFAULT;
73481+
73482+ hash->table = compat_ptr(hash_compat.table);
73483+ hash->nametable = compat_ptr(hash_compat.nametable);
73484+ hash->first = compat_ptr(hash_compat.first);
73485+
73486+ hash->table_size = hash_compat.table_size;
73487+ hash->used_size = hash_compat.used_size;
73488+
73489+ hash->type = hash_compat.type;
73490+
73491+ return 0;
73492+}
73493+
73494+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
73495+{
73496+ compat_uptr_t ptrcompat;
73497+
73498+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
73499+ return -EFAULT;
73500+
73501+ *(void **)ptr = compat_ptr(ptrcompat);
73502+
73503+ return 0;
73504+}
73505+
73506+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73507+{
73508+ struct acl_ip_label_compat ip_compat;
73509+
73510+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
73511+ return -EFAULT;
73512+
73513+ ip->iface = compat_ptr(ip_compat.iface);
73514+ ip->addr = ip_compat.addr;
73515+ ip->netmask = ip_compat.netmask;
73516+ ip->low = ip_compat.low;
73517+ ip->high = ip_compat.high;
73518+ ip->mode = ip_compat.mode;
73519+ ip->type = ip_compat.type;
73520+
73521+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
73522+
73523+ ip->prev = compat_ptr(ip_compat.prev);
73524+ ip->next = compat_ptr(ip_compat.next);
73525+
73526+ return 0;
73527+}
73528+
73529+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73530+{
73531+ struct sprole_pw_compat pw_compat;
73532+
73533+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
73534+ return -EFAULT;
73535+
73536+ pw->rolename = compat_ptr(pw_compat.rolename);
73537+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
73538+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
73539+
73540+ return 0;
73541+}
73542+
73543+size_t get_gr_arg_wrapper_size_compat(void)
73544+{
73545+ return sizeof(struct gr_arg_wrapper_compat);
73546+}
73547+
73548diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
73549new file mode 100644
73550index 0000000..4008fdc
73551--- /dev/null
73552+++ b/grsecurity/gracl_fs.c
73553@@ -0,0 +1,445 @@
73554+#include <linux/kernel.h>
73555+#include <linux/sched.h>
73556+#include <linux/types.h>
73557+#include <linux/fs.h>
73558+#include <linux/file.h>
73559+#include <linux/stat.h>
73560+#include <linux/grsecurity.h>
73561+#include <linux/grinternal.h>
73562+#include <linux/gracl.h>
73563+
73564+umode_t
73565+gr_acl_umask(void)
73566+{
73567+ if (unlikely(!gr_acl_is_enabled()))
73568+ return 0;
73569+
73570+ return current->role->umask;
73571+}
73572+
73573+__u32
73574+gr_acl_handle_hidden_file(const struct dentry * dentry,
73575+ const struct vfsmount * mnt)
73576+{
73577+ __u32 mode;
73578+
73579+ if (unlikely(d_is_negative(dentry)))
73580+ return GR_FIND;
73581+
73582+ mode =
73583+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
73584+
73585+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
73586+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
73587+ return mode;
73588+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
73589+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
73590+ return 0;
73591+ } else if (unlikely(!(mode & GR_FIND)))
73592+ return 0;
73593+
73594+ return GR_FIND;
73595+}
73596+
73597+__u32
73598+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
73599+ int acc_mode)
73600+{
73601+ __u32 reqmode = GR_FIND;
73602+ __u32 mode;
73603+
73604+ if (unlikely(d_is_negative(dentry)))
73605+ return reqmode;
73606+
73607+ if (acc_mode & MAY_APPEND)
73608+ reqmode |= GR_APPEND;
73609+ else if (acc_mode & MAY_WRITE)
73610+ reqmode |= GR_WRITE;
73611+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
73612+ reqmode |= GR_READ;
73613+
73614+ mode =
73615+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
73616+ mnt);
73617+
73618+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73619+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
73620+ reqmode & GR_READ ? " reading" : "",
73621+ reqmode & GR_WRITE ? " writing" : reqmode &
73622+ GR_APPEND ? " appending" : "");
73623+ return reqmode;
73624+ } else
73625+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73626+ {
73627+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
73628+ reqmode & GR_READ ? " reading" : "",
73629+ reqmode & GR_WRITE ? " writing" : reqmode &
73630+ GR_APPEND ? " appending" : "");
73631+ return 0;
73632+ } else if (unlikely((mode & reqmode) != reqmode))
73633+ return 0;
73634+
73635+ return reqmode;
73636+}
73637+
73638+__u32
73639+gr_acl_handle_creat(const struct dentry * dentry,
73640+ const struct dentry * p_dentry,
73641+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
73642+ const int imode)
73643+{
73644+ __u32 reqmode = GR_WRITE | GR_CREATE;
73645+ __u32 mode;
73646+
73647+ if (acc_mode & MAY_APPEND)
73648+ reqmode |= GR_APPEND;
73649+ // if a directory was required or the directory already exists, then
73650+ // don't count this open as a read
73651+ if ((acc_mode & MAY_READ) &&
73652+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
73653+ reqmode |= GR_READ;
73654+ if ((open_flags & O_CREAT) &&
73655+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
73656+ reqmode |= GR_SETID;
73657+
73658+ mode =
73659+ gr_check_create(dentry, p_dentry, p_mnt,
73660+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
73661+
73662+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73663+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
73664+ reqmode & GR_READ ? " reading" : "",
73665+ reqmode & GR_WRITE ? " writing" : reqmode &
73666+ GR_APPEND ? " appending" : "");
73667+ return reqmode;
73668+ } else
73669+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73670+ {
73671+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
73672+ reqmode & GR_READ ? " reading" : "",
73673+ reqmode & GR_WRITE ? " writing" : reqmode &
73674+ GR_APPEND ? " appending" : "");
73675+ return 0;
73676+ } else if (unlikely((mode & reqmode) != reqmode))
73677+ return 0;
73678+
73679+ return reqmode;
73680+}
73681+
73682+__u32
73683+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
73684+ const int fmode)
73685+{
73686+ __u32 mode, reqmode = GR_FIND;
73687+
73688+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
73689+ reqmode |= GR_EXEC;
73690+ if (fmode & S_IWOTH)
73691+ reqmode |= GR_WRITE;
73692+ if (fmode & S_IROTH)
73693+ reqmode |= GR_READ;
73694+
73695+ mode =
73696+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
73697+ mnt);
73698+
73699+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73700+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
73701+ reqmode & GR_READ ? " reading" : "",
73702+ reqmode & GR_WRITE ? " writing" : "",
73703+ reqmode & GR_EXEC ? " executing" : "");
73704+ return reqmode;
73705+ } else
73706+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73707+ {
73708+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
73709+ reqmode & GR_READ ? " reading" : "",
73710+ reqmode & GR_WRITE ? " writing" : "",
73711+ reqmode & GR_EXEC ? " executing" : "");
73712+ return 0;
73713+ } else if (unlikely((mode & reqmode) != reqmode))
73714+ return 0;
73715+
73716+ return reqmode;
73717+}
73718+
73719+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
73720+{
73721+ __u32 mode;
73722+
73723+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
73724+
73725+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
73726+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
73727+ return mode;
73728+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
73729+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
73730+ return 0;
73731+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
73732+ return 0;
73733+
73734+ return (reqmode);
73735+}
73736+
73737+__u32
73738+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
73739+{
73740+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
73741+}
73742+
73743+__u32
73744+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
73745+{
73746+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
73747+}
73748+
73749+__u32
73750+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
73751+{
73752+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
73753+}
73754+
73755+__u32
73756+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
73757+{
73758+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
73759+}
73760+
73761+__u32
73762+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
73763+ umode_t *modeptr)
73764+{
73765+ umode_t mode;
73766+
73767+ *modeptr &= ~gr_acl_umask();
73768+ mode = *modeptr;
73769+
73770+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
73771+ return 1;
73772+
73773+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
73774+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
73775+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
73776+ GR_CHMOD_ACL_MSG);
73777+ } else {
73778+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
73779+ }
73780+}
73781+
73782+__u32
73783+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
73784+{
73785+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
73786+}
73787+
73788+__u32
73789+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
73790+{
73791+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
73792+}
73793+
73794+__u32
73795+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
73796+{
73797+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
73798+}
73799+
73800+__u32
73801+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
73802+{
73803+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
73804+}
73805+
73806+__u32
73807+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
73808+{
73809+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
73810+ GR_UNIXCONNECT_ACL_MSG);
73811+}
73812+
73813+/* hardlinks require at minimum create and link permission,
73814+ any additional privilege required is based on the
73815+ privilege of the file being linked to
73816+*/
73817+__u32
73818+gr_acl_handle_link(const struct dentry * new_dentry,
73819+ const struct dentry * parent_dentry,
73820+ const struct vfsmount * parent_mnt,
73821+ const struct dentry * old_dentry,
73822+ const struct vfsmount * old_mnt, const struct filename *to)
73823+{
73824+ __u32 mode;
73825+ __u32 needmode = GR_CREATE | GR_LINK;
73826+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
73827+
73828+ mode =
73829+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
73830+ old_mnt);
73831+
73832+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
73833+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73834+ return mode;
73835+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73836+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73837+ return 0;
73838+ } else if (unlikely((mode & needmode) != needmode))
73839+ return 0;
73840+
73841+ return 1;
73842+}
73843+
73844+__u32
73845+gr_acl_handle_symlink(const struct dentry * new_dentry,
73846+ const struct dentry * parent_dentry,
73847+ const struct vfsmount * parent_mnt, const struct filename *from)
73848+{
73849+ __u32 needmode = GR_WRITE | GR_CREATE;
73850+ __u32 mode;
73851+
73852+ mode =
73853+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
73854+ GR_CREATE | GR_AUDIT_CREATE |
73855+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
73856+
73857+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
73858+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73859+ return mode;
73860+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73861+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73862+ return 0;
73863+ } else if (unlikely((mode & needmode) != needmode))
73864+ return 0;
73865+
73866+ return (GR_WRITE | GR_CREATE);
73867+}
73868+
73869+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
73870+{
73871+ __u32 mode;
73872+
73873+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
73874+
73875+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
73876+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
73877+ return mode;
73878+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
73879+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
73880+ return 0;
73881+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
73882+ return 0;
73883+
73884+ return (reqmode);
73885+}
73886+
73887+__u32
73888+gr_acl_handle_mknod(const struct dentry * new_dentry,
73889+ const struct dentry * parent_dentry,
73890+ const struct vfsmount * parent_mnt,
73891+ const int mode)
73892+{
73893+ __u32 reqmode = GR_WRITE | GR_CREATE;
73894+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
73895+ reqmode |= GR_SETID;
73896+
73897+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73898+ reqmode, GR_MKNOD_ACL_MSG);
73899+}
73900+
73901+__u32
73902+gr_acl_handle_mkdir(const struct dentry *new_dentry,
73903+ const struct dentry *parent_dentry,
73904+ const struct vfsmount *parent_mnt)
73905+{
73906+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73907+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
73908+}
73909+
73910+#define RENAME_CHECK_SUCCESS(old, new) \
73911+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
73912+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
73913+
73914+int
73915+gr_acl_handle_rename(struct dentry *new_dentry,
73916+ struct dentry *parent_dentry,
73917+ const struct vfsmount *parent_mnt,
73918+ struct dentry *old_dentry,
73919+ struct inode *old_parent_inode,
73920+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
73921+{
73922+ __u32 comp1, comp2;
73923+ int error = 0;
73924+
73925+ if (unlikely(!gr_acl_is_enabled()))
73926+ return 0;
73927+
73928+ if (flags & RENAME_EXCHANGE) {
73929+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73930+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73931+ GR_SUPPRESS, parent_mnt);
73932+ comp2 =
73933+ gr_search_file(old_dentry,
73934+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73935+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73936+ } else if (d_is_negative(new_dentry)) {
73937+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
73938+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
73939+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
73940+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
73941+ GR_DELETE | GR_AUDIT_DELETE |
73942+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73943+ GR_SUPPRESS, old_mnt);
73944+ } else {
73945+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73946+ GR_CREATE | GR_DELETE |
73947+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
73948+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73949+ GR_SUPPRESS, parent_mnt);
73950+ comp2 =
73951+ gr_search_file(old_dentry,
73952+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73953+ GR_DELETE | GR_AUDIT_DELETE |
73954+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73955+ }
73956+
73957+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
73958+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
73959+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73960+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
73961+ && !(comp2 & GR_SUPPRESS)) {
73962+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73963+ error = -EACCES;
73964+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
73965+ error = -EACCES;
73966+
73967+ return error;
73968+}
73969+
73970+void
73971+gr_acl_handle_exit(void)
73972+{
73973+ u16 id;
73974+ char *rolename;
73975+
73976+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
73977+ !(current->role->roletype & GR_ROLE_PERSIST))) {
73978+ id = current->acl_role_id;
73979+ rolename = current->role->rolename;
73980+ gr_set_acls(1);
73981+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
73982+ }
73983+
73984+ gr_put_exec_file(current);
73985+ return;
73986+}
73987+
73988+int
73989+gr_acl_handle_procpidmem(const struct task_struct *task)
73990+{
73991+ if (unlikely(!gr_acl_is_enabled()))
73992+ return 0;
73993+
73994+ if (task != current && task->acl->mode & GR_PROTPROCFD)
73995+ return -EACCES;
73996+
73997+ return 0;
73998+}
73999diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
74000new file mode 100644
74001index 0000000..f056b81
74002--- /dev/null
74003+++ b/grsecurity/gracl_ip.c
74004@@ -0,0 +1,386 @@
74005+#include <linux/kernel.h>
74006+#include <asm/uaccess.h>
74007+#include <asm/errno.h>
74008+#include <net/sock.h>
74009+#include <linux/file.h>
74010+#include <linux/fs.h>
74011+#include <linux/net.h>
74012+#include <linux/in.h>
74013+#include <linux/skbuff.h>
74014+#include <linux/ip.h>
74015+#include <linux/udp.h>
74016+#include <linux/types.h>
74017+#include <linux/sched.h>
74018+#include <linux/netdevice.h>
74019+#include <linux/inetdevice.h>
74020+#include <linux/gracl.h>
74021+#include <linux/grsecurity.h>
74022+#include <linux/grinternal.h>
74023+
74024+#define GR_BIND 0x01
74025+#define GR_CONNECT 0x02
74026+#define GR_INVERT 0x04
74027+#define GR_BINDOVERRIDE 0x08
74028+#define GR_CONNECTOVERRIDE 0x10
74029+#define GR_SOCK_FAMILY 0x20
74030+
74031+static const char * gr_protocols[IPPROTO_MAX] = {
74032+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
74033+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
74034+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
74035+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
74036+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
74037+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
74038+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
74039+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
74040+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
74041+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
74042+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
74043+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
74044+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
74045+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
74046+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
74047+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
74048+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
74049+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
74050+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
74051+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
74052+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
74053+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
74054+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
74055+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
74056+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
74057+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
74058+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
74059+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
74060+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
74061+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
74062+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
74063+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
74064+ };
74065+
74066+static const char * gr_socktypes[SOCK_MAX] = {
74067+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
74068+ "unknown:7", "unknown:8", "unknown:9", "packet"
74069+ };
74070+
74071+static const char * gr_sockfamilies[AF_MAX+1] = {
74072+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
74073+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
74074+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
74075+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
74076+ };
74077+
74078+const char *
74079+gr_proto_to_name(unsigned char proto)
74080+{
74081+ return gr_protocols[proto];
74082+}
74083+
74084+const char *
74085+gr_socktype_to_name(unsigned char type)
74086+{
74087+ return gr_socktypes[type];
74088+}
74089+
74090+const char *
74091+gr_sockfamily_to_name(unsigned char family)
74092+{
74093+ return gr_sockfamilies[family];
74094+}
74095+
74096+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
74097+
74098+int
74099+gr_search_socket(const int domain, const int type, const int protocol)
74100+{
74101+ struct acl_subject_label *curr;
74102+ const struct cred *cred = current_cred();
74103+
74104+ if (unlikely(!gr_acl_is_enabled()))
74105+ goto exit;
74106+
74107+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
74108+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
74109+ goto exit; // let the kernel handle it
74110+
74111+ curr = current->acl;
74112+
74113+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
74114+ /* the family is allowed, if this is PF_INET allow it only if
74115+ the extra sock type/protocol checks pass */
74116+ if (domain == PF_INET)
74117+ goto inet_check;
74118+ goto exit;
74119+ } else {
74120+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74121+ __u32 fakeip = 0;
74122+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74123+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74124+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74125+ gr_to_filename(current->exec_file->f_path.dentry,
74126+ current->exec_file->f_path.mnt) :
74127+ curr->filename, curr->filename,
74128+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
74129+ &current->signal->saved_ip);
74130+ goto exit;
74131+ }
74132+ goto exit_fail;
74133+ }
74134+
74135+inet_check:
74136+ /* the rest of this checking is for IPv4 only */
74137+ if (!curr->ips)
74138+ goto exit;
74139+
74140+ if ((curr->ip_type & (1U << type)) &&
74141+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
74142+ goto exit;
74143+
74144+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74145+ /* we don't place acls on raw sockets , and sometimes
74146+ dgram/ip sockets are opened for ioctl and not
74147+ bind/connect, so we'll fake a bind learn log */
74148+ if (type == SOCK_RAW || type == SOCK_PACKET) {
74149+ __u32 fakeip = 0;
74150+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74151+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74152+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74153+ gr_to_filename(current->exec_file->f_path.dentry,
74154+ current->exec_file->f_path.mnt) :
74155+ curr->filename, curr->filename,
74156+ &fakeip, 0, type,
74157+ protocol, GR_CONNECT, &current->signal->saved_ip);
74158+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
74159+ __u32 fakeip = 0;
74160+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74161+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74162+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74163+ gr_to_filename(current->exec_file->f_path.dentry,
74164+ current->exec_file->f_path.mnt) :
74165+ curr->filename, curr->filename,
74166+ &fakeip, 0, type,
74167+ protocol, GR_BIND, &current->signal->saved_ip);
74168+ }
74169+ /* we'll log when they use connect or bind */
74170+ goto exit;
74171+ }
74172+
74173+exit_fail:
74174+ if (domain == PF_INET)
74175+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
74176+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
74177+ else if (rcu_access_pointer(net_families[domain]) != NULL)
74178+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
74179+ gr_socktype_to_name(type), protocol);
74180+
74181+ return 0;
74182+exit:
74183+ return 1;
74184+}
74185+
74186+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
74187+{
74188+ if ((ip->mode & mode) &&
74189+ (ip_port >= ip->low) &&
74190+ (ip_port <= ip->high) &&
74191+ ((ntohl(ip_addr) & our_netmask) ==
74192+ (ntohl(our_addr) & our_netmask))
74193+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
74194+ && (ip->type & (1U << type))) {
74195+ if (ip->mode & GR_INVERT)
74196+ return 2; // specifically denied
74197+ else
74198+ return 1; // allowed
74199+ }
74200+
74201+ return 0; // not specifically allowed, may continue parsing
74202+}
74203+
74204+static int
74205+gr_search_connectbind(const int full_mode, struct sock *sk,
74206+ struct sockaddr_in *addr, const int type)
74207+{
74208+ char iface[IFNAMSIZ] = {0};
74209+ struct acl_subject_label *curr;
74210+ struct acl_ip_label *ip;
74211+ struct inet_sock *isk;
74212+ struct net_device *dev;
74213+ struct in_device *idev;
74214+ unsigned long i;
74215+ int ret;
74216+ int mode = full_mode & (GR_BIND | GR_CONNECT);
74217+ __u32 ip_addr = 0;
74218+ __u32 our_addr;
74219+ __u32 our_netmask;
74220+ char *p;
74221+ __u16 ip_port = 0;
74222+ const struct cred *cred = current_cred();
74223+
74224+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
74225+ return 0;
74226+
74227+ curr = current->acl;
74228+ isk = inet_sk(sk);
74229+
74230+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
74231+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
74232+ addr->sin_addr.s_addr = curr->inaddr_any_override;
74233+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
74234+ struct sockaddr_in saddr;
74235+ int err;
74236+
74237+ saddr.sin_family = AF_INET;
74238+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
74239+ saddr.sin_port = isk->inet_sport;
74240+
74241+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
74242+ if (err)
74243+ return err;
74244+
74245+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
74246+ if (err)
74247+ return err;
74248+ }
74249+
74250+ if (!curr->ips)
74251+ return 0;
74252+
74253+ ip_addr = addr->sin_addr.s_addr;
74254+ ip_port = ntohs(addr->sin_port);
74255+
74256+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74257+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74258+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74259+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74260+ gr_to_filename(current->exec_file->f_path.dentry,
74261+ current->exec_file->f_path.mnt) :
74262+ curr->filename, curr->filename,
74263+ &ip_addr, ip_port, type,
74264+ sk->sk_protocol, mode, &current->signal->saved_ip);
74265+ return 0;
74266+ }
74267+
74268+ for (i = 0; i < curr->ip_num; i++) {
74269+ ip = *(curr->ips + i);
74270+ if (ip->iface != NULL) {
74271+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
74272+ p = strchr(iface, ':');
74273+ if (p != NULL)
74274+ *p = '\0';
74275+ dev = dev_get_by_name(sock_net(sk), iface);
74276+ if (dev == NULL)
74277+ continue;
74278+ idev = in_dev_get(dev);
74279+ if (idev == NULL) {
74280+ dev_put(dev);
74281+ continue;
74282+ }
74283+ rcu_read_lock();
74284+ for_ifa(idev) {
74285+ if (!strcmp(ip->iface, ifa->ifa_label)) {
74286+ our_addr = ifa->ifa_address;
74287+ our_netmask = 0xffffffff;
74288+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
74289+ if (ret == 1) {
74290+ rcu_read_unlock();
74291+ in_dev_put(idev);
74292+ dev_put(dev);
74293+ return 0;
74294+ } else if (ret == 2) {
74295+ rcu_read_unlock();
74296+ in_dev_put(idev);
74297+ dev_put(dev);
74298+ goto denied;
74299+ }
74300+ }
74301+ } endfor_ifa(idev);
74302+ rcu_read_unlock();
74303+ in_dev_put(idev);
74304+ dev_put(dev);
74305+ } else {
74306+ our_addr = ip->addr;
74307+ our_netmask = ip->netmask;
74308+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
74309+ if (ret == 1)
74310+ return 0;
74311+ else if (ret == 2)
74312+ goto denied;
74313+ }
74314+ }
74315+
74316+denied:
74317+ if (mode == GR_BIND)
74318+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
74319+ else if (mode == GR_CONNECT)
74320+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
74321+
74322+ return -EACCES;
74323+}
74324+
74325+int
74326+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
74327+{
74328+ /* always allow disconnection of dgram sockets with connect */
74329+ if (addr->sin_family == AF_UNSPEC)
74330+ return 0;
74331+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
74332+}
74333+
74334+int
74335+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
74336+{
74337+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
74338+}
74339+
74340+int gr_search_listen(struct socket *sock)
74341+{
74342+ struct sock *sk = sock->sk;
74343+ struct sockaddr_in addr;
74344+
74345+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
74346+ addr.sin_port = inet_sk(sk)->inet_sport;
74347+
74348+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
74349+}
74350+
74351+int gr_search_accept(struct socket *sock)
74352+{
74353+ struct sock *sk = sock->sk;
74354+ struct sockaddr_in addr;
74355+
74356+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
74357+ addr.sin_port = inet_sk(sk)->inet_sport;
74358+
74359+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
74360+}
74361+
74362+int
74363+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
74364+{
74365+ if (addr)
74366+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
74367+ else {
74368+ struct sockaddr_in sin;
74369+ const struct inet_sock *inet = inet_sk(sk);
74370+
74371+ sin.sin_addr.s_addr = inet->inet_daddr;
74372+ sin.sin_port = inet->inet_dport;
74373+
74374+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
74375+ }
74376+}
74377+
74378+int
74379+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
74380+{
74381+ struct sockaddr_in sin;
74382+
74383+ if (unlikely(skb->len < sizeof (struct udphdr)))
74384+ return 0; // skip this packet
74385+
74386+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
74387+ sin.sin_port = udp_hdr(skb)->source;
74388+
74389+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
74390+}
74391diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
74392new file mode 100644
74393index 0000000..25f54ef
74394--- /dev/null
74395+++ b/grsecurity/gracl_learn.c
74396@@ -0,0 +1,207 @@
74397+#include <linux/kernel.h>
74398+#include <linux/mm.h>
74399+#include <linux/sched.h>
74400+#include <linux/poll.h>
74401+#include <linux/string.h>
74402+#include <linux/file.h>
74403+#include <linux/types.h>
74404+#include <linux/vmalloc.h>
74405+#include <linux/grinternal.h>
74406+
74407+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
74408+ size_t count, loff_t *ppos);
74409+extern int gr_acl_is_enabled(void);
74410+
74411+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
74412+static int gr_learn_attached;
74413+
74414+/* use a 512k buffer */
74415+#define LEARN_BUFFER_SIZE (512 * 1024)
74416+
74417+static DEFINE_SPINLOCK(gr_learn_lock);
74418+static DEFINE_MUTEX(gr_learn_user_mutex);
74419+
74420+/* we need to maintain two buffers, so that the kernel context of grlearn
74421+ uses a semaphore around the userspace copying, and the other kernel contexts
74422+ use a spinlock when copying into the buffer, since they cannot sleep
74423+*/
74424+static char *learn_buffer;
74425+static char *learn_buffer_user;
74426+static int learn_buffer_len;
74427+static int learn_buffer_user_len;
74428+
74429+static ssize_t
74430+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
74431+{
74432+ DECLARE_WAITQUEUE(wait, current);
74433+ ssize_t retval = 0;
74434+
74435+ add_wait_queue(&learn_wait, &wait);
74436+ set_current_state(TASK_INTERRUPTIBLE);
74437+ do {
74438+ mutex_lock(&gr_learn_user_mutex);
74439+ spin_lock(&gr_learn_lock);
74440+ if (learn_buffer_len)
74441+ break;
74442+ spin_unlock(&gr_learn_lock);
74443+ mutex_unlock(&gr_learn_user_mutex);
74444+ if (file->f_flags & O_NONBLOCK) {
74445+ retval = -EAGAIN;
74446+ goto out;
74447+ }
74448+ if (signal_pending(current)) {
74449+ retval = -ERESTARTSYS;
74450+ goto out;
74451+ }
74452+
74453+ schedule();
74454+ } while (1);
74455+
74456+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
74457+ learn_buffer_user_len = learn_buffer_len;
74458+ retval = learn_buffer_len;
74459+ learn_buffer_len = 0;
74460+
74461+ spin_unlock(&gr_learn_lock);
74462+
74463+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
74464+ retval = -EFAULT;
74465+
74466+ mutex_unlock(&gr_learn_user_mutex);
74467+out:
74468+ set_current_state(TASK_RUNNING);
74469+ remove_wait_queue(&learn_wait, &wait);
74470+ return retval;
74471+}
74472+
74473+static unsigned int
74474+poll_learn(struct file * file, poll_table * wait)
74475+{
74476+ poll_wait(file, &learn_wait, wait);
74477+
74478+ if (learn_buffer_len)
74479+ return (POLLIN | POLLRDNORM);
74480+
74481+ return 0;
74482+}
74483+
74484+void
74485+gr_clear_learn_entries(void)
74486+{
74487+ char *tmp;
74488+
74489+ mutex_lock(&gr_learn_user_mutex);
74490+ spin_lock(&gr_learn_lock);
74491+ tmp = learn_buffer;
74492+ learn_buffer = NULL;
74493+ spin_unlock(&gr_learn_lock);
74494+ if (tmp)
74495+ vfree(tmp);
74496+ if (learn_buffer_user != NULL) {
74497+ vfree(learn_buffer_user);
74498+ learn_buffer_user = NULL;
74499+ }
74500+ learn_buffer_len = 0;
74501+ mutex_unlock(&gr_learn_user_mutex);
74502+
74503+ return;
74504+}
74505+
74506+void
74507+gr_add_learn_entry(const char *fmt, ...)
74508+{
74509+ va_list args;
74510+ unsigned int len;
74511+
74512+ if (!gr_learn_attached)
74513+ return;
74514+
74515+ spin_lock(&gr_learn_lock);
74516+
74517+ /* leave a gap at the end so we know when it's "full" but don't have to
74518+ compute the exact length of the string we're trying to append
74519+ */
74520+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
74521+ spin_unlock(&gr_learn_lock);
74522+ wake_up_interruptible(&learn_wait);
74523+ return;
74524+ }
74525+ if (learn_buffer == NULL) {
74526+ spin_unlock(&gr_learn_lock);
74527+ return;
74528+ }
74529+
74530+ va_start(args, fmt);
74531+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
74532+ va_end(args);
74533+
74534+ learn_buffer_len += len + 1;
74535+
74536+ spin_unlock(&gr_learn_lock);
74537+ wake_up_interruptible(&learn_wait);
74538+
74539+ return;
74540+}
74541+
74542+static int
74543+open_learn(struct inode *inode, struct file *file)
74544+{
74545+ if (file->f_mode & FMODE_READ && gr_learn_attached)
74546+ return -EBUSY;
74547+ if (file->f_mode & FMODE_READ) {
74548+ int retval = 0;
74549+ mutex_lock(&gr_learn_user_mutex);
74550+ if (learn_buffer == NULL)
74551+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
74552+ if (learn_buffer_user == NULL)
74553+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
74554+ if (learn_buffer == NULL) {
74555+ retval = -ENOMEM;
74556+ goto out_error;
74557+ }
74558+ if (learn_buffer_user == NULL) {
74559+ retval = -ENOMEM;
74560+ goto out_error;
74561+ }
74562+ learn_buffer_len = 0;
74563+ learn_buffer_user_len = 0;
74564+ gr_learn_attached = 1;
74565+out_error:
74566+ mutex_unlock(&gr_learn_user_mutex);
74567+ return retval;
74568+ }
74569+ return 0;
74570+}
74571+
74572+static int
74573+close_learn(struct inode *inode, struct file *file)
74574+{
74575+ if (file->f_mode & FMODE_READ) {
74576+ char *tmp = NULL;
74577+ mutex_lock(&gr_learn_user_mutex);
74578+ spin_lock(&gr_learn_lock);
74579+ tmp = learn_buffer;
74580+ learn_buffer = NULL;
74581+ spin_unlock(&gr_learn_lock);
74582+ if (tmp)
74583+ vfree(tmp);
74584+ if (learn_buffer_user != NULL) {
74585+ vfree(learn_buffer_user);
74586+ learn_buffer_user = NULL;
74587+ }
74588+ learn_buffer_len = 0;
74589+ learn_buffer_user_len = 0;
74590+ gr_learn_attached = 0;
74591+ mutex_unlock(&gr_learn_user_mutex);
74592+ }
74593+
74594+ return 0;
74595+}
74596+
74597+const struct file_operations grsec_fops = {
74598+ .read = read_learn,
74599+ .write = write_grsec_handler,
74600+ .open = open_learn,
74601+ .release = close_learn,
74602+ .poll = poll_learn,
74603+};
74604diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
74605new file mode 100644
74606index 0000000..3f8ade0
74607--- /dev/null
74608+++ b/grsecurity/gracl_policy.c
74609@@ -0,0 +1,1782 @@
74610+#include <linux/kernel.h>
74611+#include <linux/module.h>
74612+#include <linux/sched.h>
74613+#include <linux/mm.h>
74614+#include <linux/file.h>
74615+#include <linux/fs.h>
74616+#include <linux/namei.h>
74617+#include <linux/mount.h>
74618+#include <linux/tty.h>
74619+#include <linux/proc_fs.h>
74620+#include <linux/lglock.h>
74621+#include <linux/slab.h>
74622+#include <linux/vmalloc.h>
74623+#include <linux/types.h>
74624+#include <linux/sysctl.h>
74625+#include <linux/netdevice.h>
74626+#include <linux/ptrace.h>
74627+#include <linux/gracl.h>
74628+#include <linux/gralloc.h>
74629+#include <linux/security.h>
74630+#include <linux/grinternal.h>
74631+#include <linux/pid_namespace.h>
74632+#include <linux/stop_machine.h>
74633+#include <linux/fdtable.h>
74634+#include <linux/percpu.h>
74635+#include <linux/lglock.h>
74636+#include <linux/hugetlb.h>
74637+#include <linux/posix-timers.h>
74638+#include "../fs/mount.h"
74639+
74640+#include <asm/uaccess.h>
74641+#include <asm/errno.h>
74642+#include <asm/mman.h>
74643+
74644+extern struct gr_policy_state *polstate;
74645+
74646+#define FOR_EACH_ROLE_START(role) \
74647+ role = polstate->role_list; \
74648+ while (role) {
74649+
74650+#define FOR_EACH_ROLE_END(role) \
74651+ role = role->prev; \
74652+ }
74653+
74654+struct path gr_real_root;
74655+
74656+extern struct gr_alloc_state *current_alloc_state;
74657+
74658+u16 acl_sp_role_value;
74659+
74660+static DEFINE_MUTEX(gr_dev_mutex);
74661+
74662+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
74663+extern void gr_clear_learn_entries(void);
74664+
74665+struct gr_arg *gr_usermode __read_only;
74666+unsigned char *gr_system_salt __read_only;
74667+unsigned char *gr_system_sum __read_only;
74668+
74669+static unsigned int gr_auth_attempts = 0;
74670+static unsigned long gr_auth_expires = 0UL;
74671+
74672+struct acl_object_label *fakefs_obj_rw;
74673+struct acl_object_label *fakefs_obj_rwx;
74674+
74675+extern int gr_init_uidset(void);
74676+extern void gr_free_uidset(void);
74677+extern void gr_remove_uid(uid_t uid);
74678+extern int gr_find_uid(uid_t uid);
74679+
74680+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
74681+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
74682+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
74683+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
74684+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
74685+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
74686+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
74687+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
74688+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
74689+extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
74690+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
74691+extern void assign_special_role(const char *rolename);
74692+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
74693+extern int gr_rbac_disable(void *unused);
74694+extern void gr_enable_rbac_system(void);
74695+
74696+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
74697+{
74698+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
74699+ return -EFAULT;
74700+
74701+ return 0;
74702+}
74703+
74704+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
74705+{
74706+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
74707+ return -EFAULT;
74708+
74709+ return 0;
74710+}
74711+
74712+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
74713+{
74714+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
74715+ return -EFAULT;
74716+
74717+ return 0;
74718+}
74719+
74720+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
74721+{
74722+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
74723+ return -EFAULT;
74724+
74725+ return 0;
74726+}
74727+
74728+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
74729+{
74730+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
74731+ return -EFAULT;
74732+
74733+ return 0;
74734+}
74735+
74736+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
74737+{
74738+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
74739+ return -EFAULT;
74740+
74741+ return 0;
74742+}
74743+
74744+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
74745+{
74746+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
74747+ return -EFAULT;
74748+
74749+ return 0;
74750+}
74751+
74752+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
74753+{
74754+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
74755+ return -EFAULT;
74756+
74757+ return 0;
74758+}
74759+
74760+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
74761+{
74762+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
74763+ return -EFAULT;
74764+
74765+ return 0;
74766+}
74767+
74768+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
74769+{
74770+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
74771+ return -EFAULT;
74772+
74773+ if (((uwrap->version != GRSECURITY_VERSION) &&
74774+ (uwrap->version != 0x2901)) ||
74775+ (uwrap->size != sizeof(struct gr_arg)))
74776+ return -EINVAL;
74777+
74778+ return 0;
74779+}
74780+
74781+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
74782+{
74783+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
74784+ return -EFAULT;
74785+
74786+ return 0;
74787+}
74788+
74789+static size_t get_gr_arg_wrapper_size_normal(void)
74790+{
74791+ return sizeof(struct gr_arg_wrapper);
74792+}
74793+
74794+#ifdef CONFIG_COMPAT
74795+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
74796+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
74797+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
74798+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
74799+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
74800+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
74801+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
74802+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
74803+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
74804+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
74805+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
74806+extern size_t get_gr_arg_wrapper_size_compat(void);
74807+
74808+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
74809+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
74810+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
74811+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
74812+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
74813+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
74814+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
74815+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
74816+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
74817+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
74818+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
74819+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
74820+
74821+#else
74822+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
74823+#define copy_gr_arg copy_gr_arg_normal
74824+#define copy_gr_hash_struct copy_gr_hash_struct_normal
74825+#define copy_acl_object_label copy_acl_object_label_normal
74826+#define copy_acl_subject_label copy_acl_subject_label_normal
74827+#define copy_acl_role_label copy_acl_role_label_normal
74828+#define copy_acl_ip_label copy_acl_ip_label_normal
74829+#define copy_pointer_from_array copy_pointer_from_array_normal
74830+#define copy_sprole_pw copy_sprole_pw_normal
74831+#define copy_role_transition copy_role_transition_normal
74832+#define copy_role_allowed_ip copy_role_allowed_ip_normal
74833+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
74834+#endif
74835+
74836+static struct acl_subject_label *
74837+lookup_subject_map(const struct acl_subject_label *userp)
74838+{
74839+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
74840+ struct subject_map *match;
74841+
74842+ match = polstate->subj_map_set.s_hash[index];
74843+
74844+ while (match && match->user != userp)
74845+ match = match->next;
74846+
74847+ if (match != NULL)
74848+ return match->kernel;
74849+ else
74850+ return NULL;
74851+}
74852+
74853+static void
74854+insert_subj_map_entry(struct subject_map *subjmap)
74855+{
74856+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
74857+ struct subject_map **curr;
74858+
74859+ subjmap->prev = NULL;
74860+
74861+ curr = &polstate->subj_map_set.s_hash[index];
74862+ if (*curr != NULL)
74863+ (*curr)->prev = subjmap;
74864+
74865+ subjmap->next = *curr;
74866+ *curr = subjmap;
74867+
74868+ return;
74869+}
74870+
74871+static void
74872+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
74873+{
74874+ unsigned int index =
74875+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
74876+ struct acl_role_label **curr;
74877+ struct acl_role_label *tmp, *tmp2;
74878+
74879+ curr = &polstate->acl_role_set.r_hash[index];
74880+
74881+ /* simple case, slot is empty, just set it to our role */
74882+ if (*curr == NULL) {
74883+ *curr = role;
74884+ } else {
74885+ /* example:
74886+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
74887+ 2 -> 3
74888+ */
74889+ /* first check to see if we can already be reached via this slot */
74890+ tmp = *curr;
74891+ while (tmp && tmp != role)
74892+ tmp = tmp->next;
74893+ if (tmp == role) {
74894+ /* we don't need to add ourselves to this slot's chain */
74895+ return;
74896+ }
74897+ /* we need to add ourselves to this chain, two cases */
74898+ if (role->next == NULL) {
74899+ /* simple case, append the current chain to our role */
74900+ role->next = *curr;
74901+ *curr = role;
74902+ } else {
74903+ /* 1 -> 2 -> 3 -> 4
74904+ 2 -> 3 -> 4
74905+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
74906+ */
74907+ /* trickier case: walk our role's chain until we find
74908+ the role for the start of the current slot's chain */
74909+ tmp = role;
74910+ tmp2 = *curr;
74911+ while (tmp->next && tmp->next != tmp2)
74912+ tmp = tmp->next;
74913+ if (tmp->next == tmp2) {
74914+ /* from example above, we found 3, so just
74915+ replace this slot's chain with ours */
74916+ *curr = role;
74917+ } else {
74918+ /* we didn't find a subset of our role's chain
74919+ in the current slot's chain, so append their
74920+ chain to ours, and set us as the first role in
74921+ the slot's chain
74922+
74923+ we could fold this case with the case above,
74924+ but making it explicit for clarity
74925+ */
74926+ tmp->next = tmp2;
74927+ *curr = role;
74928+ }
74929+ }
74930+ }
74931+
74932+ return;
74933+}
74934+
74935+static void
74936+insert_acl_role_label(struct acl_role_label *role)
74937+{
74938+ int i;
74939+
74940+ if (polstate->role_list == NULL) {
74941+ polstate->role_list = role;
74942+ role->prev = NULL;
74943+ } else {
74944+ role->prev = polstate->role_list;
74945+ polstate->role_list = role;
74946+ }
74947+
74948+ /* used for hash chains */
74949+ role->next = NULL;
74950+
74951+ if (role->roletype & GR_ROLE_DOMAIN) {
74952+ for (i = 0; i < role->domain_child_num; i++)
74953+ __insert_acl_role_label(role, role->domain_children[i]);
74954+ } else
74955+ __insert_acl_role_label(role, role->uidgid);
74956+}
74957+
74958+static int
74959+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
74960+{
74961+ struct name_entry **curr, *nentry;
74962+ struct inodev_entry *ientry;
74963+ unsigned int len = strlen(name);
74964+ unsigned int key = full_name_hash(name, len);
74965+ unsigned int index = key % polstate->name_set.n_size;
74966+
74967+ curr = &polstate->name_set.n_hash[index];
74968+
74969+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
74970+ curr = &((*curr)->next);
74971+
74972+ if (*curr != NULL)
74973+ return 1;
74974+
74975+ nentry = acl_alloc(sizeof (struct name_entry));
74976+ if (nentry == NULL)
74977+ return 0;
74978+ ientry = acl_alloc(sizeof (struct inodev_entry));
74979+ if (ientry == NULL)
74980+ return 0;
74981+ ientry->nentry = nentry;
74982+
74983+ nentry->key = key;
74984+ nentry->name = name;
74985+ nentry->inode = inode;
74986+ nentry->device = device;
74987+ nentry->len = len;
74988+ nentry->deleted = deleted;
74989+
74990+ nentry->prev = NULL;
74991+ curr = &polstate->name_set.n_hash[index];
74992+ if (*curr != NULL)
74993+ (*curr)->prev = nentry;
74994+ nentry->next = *curr;
74995+ *curr = nentry;
74996+
74997+ /* insert us into the table searchable by inode/dev */
74998+ __insert_inodev_entry(polstate, ientry);
74999+
75000+ return 1;
75001+}
75002+
75003+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
75004+
75005+static void *
75006+create_table(__u32 * len, int elementsize)
75007+{
75008+ unsigned int table_sizes[] = {
75009+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
75010+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
75011+ 4194301, 8388593, 16777213, 33554393, 67108859
75012+ };
75013+ void *newtable = NULL;
75014+ unsigned int pwr = 0;
75015+
75016+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
75017+ table_sizes[pwr] <= *len)
75018+ pwr++;
75019+
75020+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
75021+ return newtable;
75022+
75023+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
75024+ newtable =
75025+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
75026+ else
75027+ newtable = vmalloc(table_sizes[pwr] * elementsize);
75028+
75029+ *len = table_sizes[pwr];
75030+
75031+ return newtable;
75032+}
75033+
75034+static int
75035+init_variables(const struct gr_arg *arg, bool reload)
75036+{
75037+ struct task_struct *reaper = init_pid_ns.child_reaper;
75038+ unsigned int stacksize;
75039+
75040+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
75041+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
75042+ polstate->name_set.n_size = arg->role_db.num_objects;
75043+ polstate->inodev_set.i_size = arg->role_db.num_objects;
75044+
75045+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
75046+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
75047+ return 1;
75048+
75049+ if (!reload) {
75050+ if (!gr_init_uidset())
75051+ return 1;
75052+ }
75053+
75054+ /* set up the stack that holds allocation info */
75055+
75056+ stacksize = arg->role_db.num_pointers + 5;
75057+
75058+ if (!acl_alloc_stack_init(stacksize))
75059+ return 1;
75060+
75061+ if (!reload) {
75062+ /* grab reference for the real root dentry and vfsmount */
75063+ get_fs_root(reaper->fs, &gr_real_root);
75064+
75065+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75066+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
75067+#endif
75068+
75069+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
75070+ if (fakefs_obj_rw == NULL)
75071+ return 1;
75072+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
75073+
75074+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
75075+ if (fakefs_obj_rwx == NULL)
75076+ return 1;
75077+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
75078+ }
75079+
75080+ polstate->subj_map_set.s_hash =
75081+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
75082+ polstate->acl_role_set.r_hash =
75083+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
75084+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
75085+ polstate->inodev_set.i_hash =
75086+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
75087+
75088+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
75089+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
75090+ return 1;
75091+
75092+ memset(polstate->subj_map_set.s_hash, 0,
75093+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
75094+ memset(polstate->acl_role_set.r_hash, 0,
75095+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
75096+ memset(polstate->name_set.n_hash, 0,
75097+ sizeof (struct name_entry *) * polstate->name_set.n_size);
75098+ memset(polstate->inodev_set.i_hash, 0,
75099+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
75100+
75101+ return 0;
75102+}
75103+
75104+/* free information not needed after startup
75105+ currently contains user->kernel pointer mappings for subjects
75106+*/
75107+
75108+static void
75109+free_init_variables(void)
75110+{
75111+ __u32 i;
75112+
75113+ if (polstate->subj_map_set.s_hash) {
75114+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
75115+ if (polstate->subj_map_set.s_hash[i]) {
75116+ kfree(polstate->subj_map_set.s_hash[i]);
75117+ polstate->subj_map_set.s_hash[i] = NULL;
75118+ }
75119+ }
75120+
75121+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
75122+ PAGE_SIZE)
75123+ kfree(polstate->subj_map_set.s_hash);
75124+ else
75125+ vfree(polstate->subj_map_set.s_hash);
75126+ }
75127+
75128+ return;
75129+}
75130+
75131+static void
75132+free_variables(bool reload)
75133+{
75134+ struct acl_subject_label *s;
75135+ struct acl_role_label *r;
75136+ struct task_struct *task, *task2;
75137+ unsigned int x;
75138+
75139+ if (!reload) {
75140+ gr_clear_learn_entries();
75141+
75142+ read_lock(&tasklist_lock);
75143+ do_each_thread(task2, task) {
75144+ task->acl_sp_role = 0;
75145+ task->acl_role_id = 0;
75146+ task->inherited = 0;
75147+ task->acl = NULL;
75148+ task->role = NULL;
75149+ } while_each_thread(task2, task);
75150+ read_unlock(&tasklist_lock);
75151+
75152+ kfree(fakefs_obj_rw);
75153+ fakefs_obj_rw = NULL;
75154+ kfree(fakefs_obj_rwx);
75155+ fakefs_obj_rwx = NULL;
75156+
75157+ /* release the reference to the real root dentry and vfsmount */
75158+ path_put(&gr_real_root);
75159+ memset(&gr_real_root, 0, sizeof(gr_real_root));
75160+ }
75161+
75162+ /* free all object hash tables */
75163+
75164+ FOR_EACH_ROLE_START(r)
75165+ if (r->subj_hash == NULL)
75166+ goto next_role;
75167+ FOR_EACH_SUBJECT_START(r, s, x)
75168+ if (s->obj_hash == NULL)
75169+ break;
75170+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75171+ kfree(s->obj_hash);
75172+ else
75173+ vfree(s->obj_hash);
75174+ FOR_EACH_SUBJECT_END(s, x)
75175+ FOR_EACH_NESTED_SUBJECT_START(r, s)
75176+ if (s->obj_hash == NULL)
75177+ break;
75178+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75179+ kfree(s->obj_hash);
75180+ else
75181+ vfree(s->obj_hash);
75182+ FOR_EACH_NESTED_SUBJECT_END(s)
75183+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
75184+ kfree(r->subj_hash);
75185+ else
75186+ vfree(r->subj_hash);
75187+ r->subj_hash = NULL;
75188+next_role:
75189+ FOR_EACH_ROLE_END(r)
75190+
75191+ acl_free_all();
75192+
75193+ if (polstate->acl_role_set.r_hash) {
75194+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
75195+ PAGE_SIZE)
75196+ kfree(polstate->acl_role_set.r_hash);
75197+ else
75198+ vfree(polstate->acl_role_set.r_hash);
75199+ }
75200+ if (polstate->name_set.n_hash) {
75201+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
75202+ PAGE_SIZE)
75203+ kfree(polstate->name_set.n_hash);
75204+ else
75205+ vfree(polstate->name_set.n_hash);
75206+ }
75207+
75208+ if (polstate->inodev_set.i_hash) {
75209+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
75210+ PAGE_SIZE)
75211+ kfree(polstate->inodev_set.i_hash);
75212+ else
75213+ vfree(polstate->inodev_set.i_hash);
75214+ }
75215+
75216+ if (!reload)
75217+ gr_free_uidset();
75218+
75219+ memset(&polstate->name_set, 0, sizeof (struct name_db));
75220+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
75221+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
75222+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
75223+
75224+ polstate->default_role = NULL;
75225+ polstate->kernel_role = NULL;
75226+ polstate->role_list = NULL;
75227+
75228+ return;
75229+}
75230+
75231+static struct acl_subject_label *
75232+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
75233+
75234+static int alloc_and_copy_string(char **name, unsigned int maxlen)
75235+{
75236+ unsigned int len = strnlen_user(*name, maxlen);
75237+ char *tmp;
75238+
75239+ if (!len || len >= maxlen)
75240+ return -EINVAL;
75241+
75242+ if ((tmp = (char *) acl_alloc(len)) == NULL)
75243+ return -ENOMEM;
75244+
75245+ if (copy_from_user(tmp, *name, len))
75246+ return -EFAULT;
75247+
75248+ tmp[len-1] = '\0';
75249+ *name = tmp;
75250+
75251+ return 0;
75252+}
75253+
75254+static int
75255+copy_user_glob(struct acl_object_label *obj)
75256+{
75257+ struct acl_object_label *g_tmp, **guser;
75258+ int error;
75259+
75260+ if (obj->globbed == NULL)
75261+ return 0;
75262+
75263+ guser = &obj->globbed;
75264+ while (*guser) {
75265+ g_tmp = (struct acl_object_label *)
75266+ acl_alloc(sizeof (struct acl_object_label));
75267+ if (g_tmp == NULL)
75268+ return -ENOMEM;
75269+
75270+ if (copy_acl_object_label(g_tmp, *guser))
75271+ return -EFAULT;
75272+
75273+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
75274+ if (error)
75275+ return error;
75276+
75277+ *guser = g_tmp;
75278+ guser = &(g_tmp->next);
75279+ }
75280+
75281+ return 0;
75282+}
75283+
75284+static int
75285+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
75286+ struct acl_role_label *role)
75287+{
75288+ struct acl_object_label *o_tmp;
75289+ int ret;
75290+
75291+ while (userp) {
75292+ if ((o_tmp = (struct acl_object_label *)
75293+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
75294+ return -ENOMEM;
75295+
75296+ if (copy_acl_object_label(o_tmp, userp))
75297+ return -EFAULT;
75298+
75299+ userp = o_tmp->prev;
75300+
75301+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
75302+ if (ret)
75303+ return ret;
75304+
75305+ insert_acl_obj_label(o_tmp, subj);
75306+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
75307+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
75308+ return -ENOMEM;
75309+
75310+ ret = copy_user_glob(o_tmp);
75311+ if (ret)
75312+ return ret;
75313+
75314+ if (o_tmp->nested) {
75315+ int already_copied;
75316+
75317+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
75318+ if (IS_ERR(o_tmp->nested))
75319+ return PTR_ERR(o_tmp->nested);
75320+
75321+ /* insert into nested subject list if we haven't copied this one yet
75322+ to prevent duplicate entries */
75323+ if (!already_copied) {
75324+ o_tmp->nested->next = role->hash->first;
75325+ role->hash->first = o_tmp->nested;
75326+ }
75327+ }
75328+ }
75329+
75330+ return 0;
75331+}
75332+
75333+static __u32
75334+count_user_subjs(struct acl_subject_label *userp)
75335+{
75336+ struct acl_subject_label s_tmp;
75337+ __u32 num = 0;
75338+
75339+ while (userp) {
75340+ if (copy_acl_subject_label(&s_tmp, userp))
75341+ break;
75342+
75343+ userp = s_tmp.prev;
75344+ }
75345+
75346+ return num;
75347+}
75348+
75349+static int
75350+copy_user_allowedips(struct acl_role_label *rolep)
75351+{
75352+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
75353+
75354+ ruserip = rolep->allowed_ips;
75355+
75356+ while (ruserip) {
75357+ rlast = rtmp;
75358+
75359+ if ((rtmp = (struct role_allowed_ip *)
75360+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
75361+ return -ENOMEM;
75362+
75363+ if (copy_role_allowed_ip(rtmp, ruserip))
75364+ return -EFAULT;
75365+
75366+ ruserip = rtmp->prev;
75367+
75368+ if (!rlast) {
75369+ rtmp->prev = NULL;
75370+ rolep->allowed_ips = rtmp;
75371+ } else {
75372+ rlast->next = rtmp;
75373+ rtmp->prev = rlast;
75374+ }
75375+
75376+ if (!ruserip)
75377+ rtmp->next = NULL;
75378+ }
75379+
75380+ return 0;
75381+}
75382+
75383+static int
75384+copy_user_transitions(struct acl_role_label *rolep)
75385+{
75386+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
75387+ int error;
75388+
75389+ rusertp = rolep->transitions;
75390+
75391+ while (rusertp) {
75392+ rlast = rtmp;
75393+
75394+ if ((rtmp = (struct role_transition *)
75395+ acl_alloc(sizeof (struct role_transition))) == NULL)
75396+ return -ENOMEM;
75397+
75398+ if (copy_role_transition(rtmp, rusertp))
75399+ return -EFAULT;
75400+
75401+ rusertp = rtmp->prev;
75402+
75403+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
75404+ if (error)
75405+ return error;
75406+
75407+ if (!rlast) {
75408+ rtmp->prev = NULL;
75409+ rolep->transitions = rtmp;
75410+ } else {
75411+ rlast->next = rtmp;
75412+ rtmp->prev = rlast;
75413+ }
75414+
75415+ if (!rusertp)
75416+ rtmp->next = NULL;
75417+ }
75418+
75419+ return 0;
75420+}
75421+
75422+static __u32 count_user_objs(const struct acl_object_label __user *userp)
75423+{
75424+ struct acl_object_label o_tmp;
75425+ __u32 num = 0;
75426+
75427+ while (userp) {
75428+ if (copy_acl_object_label(&o_tmp, userp))
75429+ break;
75430+
75431+ userp = o_tmp.prev;
75432+ num++;
75433+ }
75434+
75435+ return num;
75436+}
75437+
75438+static struct acl_subject_label *
75439+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
75440+{
75441+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
75442+ __u32 num_objs;
75443+ struct acl_ip_label **i_tmp, *i_utmp2;
75444+ struct gr_hash_struct ghash;
75445+ struct subject_map *subjmap;
75446+ unsigned int i_num;
75447+ int err;
75448+
75449+ if (already_copied != NULL)
75450+ *already_copied = 0;
75451+
75452+ s_tmp = lookup_subject_map(userp);
75453+
75454+ /* we've already copied this subject into the kernel, just return
75455+ the reference to it, and don't copy it over again
75456+ */
75457+ if (s_tmp) {
75458+ if (already_copied != NULL)
75459+ *already_copied = 1;
75460+ return(s_tmp);
75461+ }
75462+
75463+ if ((s_tmp = (struct acl_subject_label *)
75464+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
75465+ return ERR_PTR(-ENOMEM);
75466+
75467+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
75468+ if (subjmap == NULL)
75469+ return ERR_PTR(-ENOMEM);
75470+
75471+ subjmap->user = userp;
75472+ subjmap->kernel = s_tmp;
75473+ insert_subj_map_entry(subjmap);
75474+
75475+ if (copy_acl_subject_label(s_tmp, userp))
75476+ return ERR_PTR(-EFAULT);
75477+
75478+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
75479+ if (err)
75480+ return ERR_PTR(err);
75481+
75482+ if (!strcmp(s_tmp->filename, "/"))
75483+ role->root_label = s_tmp;
75484+
75485+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
75486+ return ERR_PTR(-EFAULT);
75487+
75488+ /* copy user and group transition tables */
75489+
75490+ if (s_tmp->user_trans_num) {
75491+ uid_t *uidlist;
75492+
75493+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
75494+ if (uidlist == NULL)
75495+ return ERR_PTR(-ENOMEM);
75496+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
75497+ return ERR_PTR(-EFAULT);
75498+
75499+ s_tmp->user_transitions = uidlist;
75500+ }
75501+
75502+ if (s_tmp->group_trans_num) {
75503+ gid_t *gidlist;
75504+
75505+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
75506+ if (gidlist == NULL)
75507+ return ERR_PTR(-ENOMEM);
75508+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
75509+ return ERR_PTR(-EFAULT);
75510+
75511+ s_tmp->group_transitions = gidlist;
75512+ }
75513+
75514+ /* set up object hash table */
75515+ num_objs = count_user_objs(ghash.first);
75516+
75517+ s_tmp->obj_hash_size = num_objs;
75518+ s_tmp->obj_hash =
75519+ (struct acl_object_label **)
75520+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
75521+
75522+ if (!s_tmp->obj_hash)
75523+ return ERR_PTR(-ENOMEM);
75524+
75525+ memset(s_tmp->obj_hash, 0,
75526+ s_tmp->obj_hash_size *
75527+ sizeof (struct acl_object_label *));
75528+
75529+ /* add in objects */
75530+ err = copy_user_objs(ghash.first, s_tmp, role);
75531+
75532+ if (err)
75533+ return ERR_PTR(err);
75534+
75535+ /* set pointer for parent subject */
75536+ if (s_tmp->parent_subject) {
75537+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
75538+
75539+ if (IS_ERR(s_tmp2))
75540+ return s_tmp2;
75541+
75542+ s_tmp->parent_subject = s_tmp2;
75543+ }
75544+
75545+ /* add in ip acls */
75546+
75547+ if (!s_tmp->ip_num) {
75548+ s_tmp->ips = NULL;
75549+ goto insert;
75550+ }
75551+
75552+ i_tmp =
75553+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
75554+ sizeof (struct acl_ip_label *));
75555+
75556+ if (!i_tmp)
75557+ return ERR_PTR(-ENOMEM);
75558+
75559+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
75560+ *(i_tmp + i_num) =
75561+ (struct acl_ip_label *)
75562+ acl_alloc(sizeof (struct acl_ip_label));
75563+ if (!*(i_tmp + i_num))
75564+ return ERR_PTR(-ENOMEM);
75565+
75566+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
75567+ return ERR_PTR(-EFAULT);
75568+
75569+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
75570+ return ERR_PTR(-EFAULT);
75571+
75572+ if ((*(i_tmp + i_num))->iface == NULL)
75573+ continue;
75574+
75575+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
75576+ if (err)
75577+ return ERR_PTR(err);
75578+ }
75579+
75580+ s_tmp->ips = i_tmp;
75581+
75582+insert:
75583+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
75584+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
75585+ return ERR_PTR(-ENOMEM);
75586+
75587+ return s_tmp;
75588+}
75589+
75590+static int
75591+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
75592+{
75593+ struct acl_subject_label s_pre;
75594+ struct acl_subject_label * ret;
75595+ int err;
75596+
75597+ while (userp) {
75598+ if (copy_acl_subject_label(&s_pre, userp))
75599+ return -EFAULT;
75600+
75601+ ret = do_copy_user_subj(userp, role, NULL);
75602+
75603+ err = PTR_ERR(ret);
75604+ if (IS_ERR(ret))
75605+ return err;
75606+
75607+ insert_acl_subj_label(ret, role);
75608+
75609+ userp = s_pre.prev;
75610+ }
75611+
75612+ return 0;
75613+}
75614+
75615+static int
75616+copy_user_acl(struct gr_arg *arg)
75617+{
75618+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
75619+ struct acl_subject_label *subj_list;
75620+ struct sprole_pw *sptmp;
75621+ struct gr_hash_struct *ghash;
75622+ uid_t *domainlist;
75623+ unsigned int r_num;
75624+ int err = 0;
75625+ __u16 i;
75626+ __u32 num_subjs;
75627+
75628+ /* we need a default and kernel role */
75629+ if (arg->role_db.num_roles < 2)
75630+ return -EINVAL;
75631+
75632+ /* copy special role authentication info from userspace */
75633+
75634+ polstate->num_sprole_pws = arg->num_sprole_pws;
75635+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
75636+
75637+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
75638+ return -ENOMEM;
75639+
75640+ for (i = 0; i < polstate->num_sprole_pws; i++) {
75641+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
75642+ if (!sptmp)
75643+ return -ENOMEM;
75644+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
75645+ return -EFAULT;
75646+
75647+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
75648+ if (err)
75649+ return err;
75650+
75651+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75652+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
75653+#endif
75654+
75655+ polstate->acl_special_roles[i] = sptmp;
75656+ }
75657+
75658+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
75659+
75660+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
75661+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
75662+
75663+ if (!r_tmp)
75664+ return -ENOMEM;
75665+
75666+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
75667+ return -EFAULT;
75668+
75669+ if (copy_acl_role_label(r_tmp, r_utmp2))
75670+ return -EFAULT;
75671+
75672+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
75673+ if (err)
75674+ return err;
75675+
75676+ if (!strcmp(r_tmp->rolename, "default")
75677+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
75678+ polstate->default_role = r_tmp;
75679+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
75680+ polstate->kernel_role = r_tmp;
75681+ }
75682+
75683+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
75684+ return -ENOMEM;
75685+
75686+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
75687+ return -EFAULT;
75688+
75689+ r_tmp->hash = ghash;
75690+
75691+ num_subjs = count_user_subjs(r_tmp->hash->first);
75692+
75693+ r_tmp->subj_hash_size = num_subjs;
75694+ r_tmp->subj_hash =
75695+ (struct acl_subject_label **)
75696+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
75697+
75698+ if (!r_tmp->subj_hash)
75699+ return -ENOMEM;
75700+
75701+ err = copy_user_allowedips(r_tmp);
75702+ if (err)
75703+ return err;
75704+
75705+ /* copy domain info */
75706+ if (r_tmp->domain_children != NULL) {
75707+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
75708+ if (domainlist == NULL)
75709+ return -ENOMEM;
75710+
75711+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
75712+ return -EFAULT;
75713+
75714+ r_tmp->domain_children = domainlist;
75715+ }
75716+
75717+ err = copy_user_transitions(r_tmp);
75718+ if (err)
75719+ return err;
75720+
75721+ memset(r_tmp->subj_hash, 0,
75722+ r_tmp->subj_hash_size *
75723+ sizeof (struct acl_subject_label *));
75724+
75725+ /* acquire the list of subjects, then NULL out
75726+ the list prior to parsing the subjects for this role,
75727+ as during this parsing the list is replaced with a list
75728+ of *nested* subjects for the role
75729+ */
75730+ subj_list = r_tmp->hash->first;
75731+
75732+ /* set nested subject list to null */
75733+ r_tmp->hash->first = NULL;
75734+
75735+ err = copy_user_subjs(subj_list, r_tmp);
75736+
75737+ if (err)
75738+ return err;
75739+
75740+ insert_acl_role_label(r_tmp);
75741+ }
75742+
75743+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
75744+ return -EINVAL;
75745+
75746+ return err;
75747+}
75748+
75749+static int gracl_reload_apply_policies(void *reload)
75750+{
75751+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
75752+ struct task_struct *task, *task2;
75753+ struct acl_role_label *role, *rtmp;
75754+ struct acl_subject_label *subj;
75755+ const struct cred *cred;
75756+ int role_applied;
75757+ int ret = 0;
75758+
75759+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
75760+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
75761+
75762+ /* first make sure we'll be able to apply the new policy cleanly */
75763+ do_each_thread(task2, task) {
75764+ if (task->exec_file == NULL)
75765+ continue;
75766+ role_applied = 0;
75767+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
75768+ /* preserve special roles */
75769+ FOR_EACH_ROLE_START(role)
75770+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
75771+ rtmp = task->role;
75772+ task->role = role;
75773+ role_applied = 1;
75774+ break;
75775+ }
75776+ FOR_EACH_ROLE_END(role)
75777+ }
75778+ if (!role_applied) {
75779+ cred = __task_cred(task);
75780+ rtmp = task->role;
75781+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75782+ }
75783+ /* this handles non-nested inherited subjects, nested subjects will still
75784+ be dropped currently */
75785+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
75786+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
75787+ /* change the role back so that we've made no modifications to the policy */
75788+ task->role = rtmp;
75789+
75790+ if (subj == NULL || task->tmpacl == NULL) {
75791+ ret = -EINVAL;
75792+ goto out;
75793+ }
75794+ } while_each_thread(task2, task);
75795+
75796+ /* now actually apply the policy */
75797+
75798+ do_each_thread(task2, task) {
75799+ if (task->exec_file) {
75800+ role_applied = 0;
75801+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
75802+ /* preserve special roles */
75803+ FOR_EACH_ROLE_START(role)
75804+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
75805+ task->role = role;
75806+ role_applied = 1;
75807+ break;
75808+ }
75809+ FOR_EACH_ROLE_END(role)
75810+ }
75811+ if (!role_applied) {
75812+ cred = __task_cred(task);
75813+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75814+ }
75815+ /* this handles non-nested inherited subjects, nested subjects will still
75816+ be dropped currently */
75817+ if (!reload_state->oldmode && task->inherited)
75818+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
75819+ else {
75820+ /* looked up and tagged to the task previously */
75821+ subj = task->tmpacl;
75822+ }
75823+ /* subj will be non-null */
75824+ __gr_apply_subject_to_task(polstate, task, subj);
75825+ if (reload_state->oldmode) {
75826+ task->acl_role_id = 0;
75827+ task->acl_sp_role = 0;
75828+ task->inherited = 0;
75829+ }
75830+ } else {
75831+ // it's a kernel process
75832+ task->role = polstate->kernel_role;
75833+ task->acl = polstate->kernel_role->root_label;
75834+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75835+ task->acl->mode &= ~GR_PROCFIND;
75836+#endif
75837+ }
75838+ } while_each_thread(task2, task);
75839+
75840+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
75841+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
75842+
75843+out:
75844+
75845+ return ret;
75846+}
75847+
75848+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
75849+{
75850+ struct gr_reload_state new_reload_state = { };
75851+ int err;
75852+
75853+ new_reload_state.oldpolicy_ptr = polstate;
75854+ new_reload_state.oldalloc_ptr = current_alloc_state;
75855+ new_reload_state.oldmode = oldmode;
75856+
75857+ current_alloc_state = &new_reload_state.newalloc;
75858+ polstate = &new_reload_state.newpolicy;
75859+
75860+ /* everything relevant is now saved off, copy in the new policy */
75861+ if (init_variables(args, true)) {
75862+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75863+ err = -ENOMEM;
75864+ goto error;
75865+ }
75866+
75867+ err = copy_user_acl(args);
75868+ free_init_variables();
75869+ if (err)
75870+ goto error;
75871+ /* the new policy is copied in, with the old policy available via saved_state
75872+ first go through applying roles, making sure to preserve special roles
75873+ then apply new subjects, making sure to preserve inherited and nested subjects,
75874+ though currently only inherited subjects will be preserved
75875+ */
75876+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
75877+ if (err)
75878+ goto error;
75879+
75880+ /* we've now applied the new policy, so restore the old policy state to free it */
75881+ polstate = &new_reload_state.oldpolicy;
75882+ current_alloc_state = &new_reload_state.oldalloc;
75883+ free_variables(true);
75884+
75885+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
75886+ to running_polstate/current_alloc_state inside stop_machine
75887+ */
75888+ err = 0;
75889+ goto out;
75890+error:
75891+ /* on error of loading the new policy, we'll just keep the previous
75892+ policy set around
75893+ */
75894+ free_variables(true);
75895+
75896+ /* doesn't affect runtime, but maintains consistent state */
75897+out:
75898+ polstate = new_reload_state.oldpolicy_ptr;
75899+ current_alloc_state = new_reload_state.oldalloc_ptr;
75900+
75901+ return err;
75902+}
75903+
75904+static int
75905+gracl_init(struct gr_arg *args)
75906+{
75907+ int error = 0;
75908+
75909+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
75910+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
75911+
75912+ if (init_variables(args, false)) {
75913+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75914+ error = -ENOMEM;
75915+ goto out;
75916+ }
75917+
75918+ error = copy_user_acl(args);
75919+ free_init_variables();
75920+ if (error)
75921+ goto out;
75922+
75923+ error = gr_set_acls(0);
75924+ if (error)
75925+ goto out;
75926+
75927+ gr_enable_rbac_system();
75928+
75929+ return 0;
75930+
75931+out:
75932+ free_variables(false);
75933+ return error;
75934+}
75935+
75936+static int
75937+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
75938+ unsigned char **sum)
75939+{
75940+ struct acl_role_label *r;
75941+ struct role_allowed_ip *ipp;
75942+ struct role_transition *trans;
75943+ unsigned int i;
75944+ int found = 0;
75945+ u32 curr_ip = current->signal->curr_ip;
75946+
75947+ current->signal->saved_ip = curr_ip;
75948+
75949+ /* check transition table */
75950+
75951+ for (trans = current->role->transitions; trans; trans = trans->next) {
75952+ if (!strcmp(rolename, trans->rolename)) {
75953+ found = 1;
75954+ break;
75955+ }
75956+ }
75957+
75958+ if (!found)
75959+ return 0;
75960+
75961+ /* handle special roles that do not require authentication
75962+ and check ip */
75963+
75964+ FOR_EACH_ROLE_START(r)
75965+ if (!strcmp(rolename, r->rolename) &&
75966+ (r->roletype & GR_ROLE_SPECIAL)) {
75967+ found = 0;
75968+ if (r->allowed_ips != NULL) {
75969+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
75970+ if ((ntohl(curr_ip) & ipp->netmask) ==
75971+ (ntohl(ipp->addr) & ipp->netmask))
75972+ found = 1;
75973+ }
75974+ } else
75975+ found = 2;
75976+ if (!found)
75977+ return 0;
75978+
75979+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
75980+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
75981+ *salt = NULL;
75982+ *sum = NULL;
75983+ return 1;
75984+ }
75985+ }
75986+ FOR_EACH_ROLE_END(r)
75987+
75988+ for (i = 0; i < polstate->num_sprole_pws; i++) {
75989+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
75990+ *salt = polstate->acl_special_roles[i]->salt;
75991+ *sum = polstate->acl_special_roles[i]->sum;
75992+ return 1;
75993+ }
75994+ }
75995+
75996+ return 0;
75997+}
75998+
75999+int gr_check_secure_terminal(struct task_struct *task)
76000+{
76001+ struct task_struct *p, *p2, *p3;
76002+ struct files_struct *files;
76003+ struct fdtable *fdt;
76004+ struct file *our_file = NULL, *file;
76005+ int i;
76006+
76007+ if (task->signal->tty == NULL)
76008+ return 1;
76009+
76010+ files = get_files_struct(task);
76011+ if (files != NULL) {
76012+ rcu_read_lock();
76013+ fdt = files_fdtable(files);
76014+ for (i=0; i < fdt->max_fds; i++) {
76015+ file = fcheck_files(files, i);
76016+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
76017+ get_file(file);
76018+ our_file = file;
76019+ }
76020+ }
76021+ rcu_read_unlock();
76022+ put_files_struct(files);
76023+ }
76024+
76025+ if (our_file == NULL)
76026+ return 1;
76027+
76028+ read_lock(&tasklist_lock);
76029+ do_each_thread(p2, p) {
76030+ files = get_files_struct(p);
76031+ if (files == NULL ||
76032+ (p->signal && p->signal->tty == task->signal->tty)) {
76033+ if (files != NULL)
76034+ put_files_struct(files);
76035+ continue;
76036+ }
76037+ rcu_read_lock();
76038+ fdt = files_fdtable(files);
76039+ for (i=0; i < fdt->max_fds; i++) {
76040+ file = fcheck_files(files, i);
76041+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
76042+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
76043+ p3 = task;
76044+ while (task_pid_nr(p3) > 0) {
76045+ if (p3 == p)
76046+ break;
76047+ p3 = p3->real_parent;
76048+ }
76049+ if (p3 == p)
76050+ break;
76051+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
76052+ gr_handle_alertkill(p);
76053+ rcu_read_unlock();
76054+ put_files_struct(files);
76055+ read_unlock(&tasklist_lock);
76056+ fput(our_file);
76057+ return 0;
76058+ }
76059+ }
76060+ rcu_read_unlock();
76061+ put_files_struct(files);
76062+ } while_each_thread(p2, p);
76063+ read_unlock(&tasklist_lock);
76064+
76065+ fput(our_file);
76066+ return 1;
76067+}
76068+
76069+ssize_t
76070+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
76071+{
76072+ struct gr_arg_wrapper uwrap;
76073+ unsigned char *sprole_salt = NULL;
76074+ unsigned char *sprole_sum = NULL;
76075+ int error = 0;
76076+ int error2 = 0;
76077+ size_t req_count = 0;
76078+ unsigned char oldmode = 0;
76079+
76080+ mutex_lock(&gr_dev_mutex);
76081+
76082+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
76083+ error = -EPERM;
76084+ goto out;
76085+ }
76086+
76087+#ifdef CONFIG_COMPAT
76088+ pax_open_kernel();
76089+ if (is_compat_task()) {
76090+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
76091+ copy_gr_arg = &copy_gr_arg_compat;
76092+ copy_acl_object_label = &copy_acl_object_label_compat;
76093+ copy_acl_subject_label = &copy_acl_subject_label_compat;
76094+ copy_acl_role_label = &copy_acl_role_label_compat;
76095+ copy_acl_ip_label = &copy_acl_ip_label_compat;
76096+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
76097+ copy_role_transition = &copy_role_transition_compat;
76098+ copy_sprole_pw = &copy_sprole_pw_compat;
76099+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
76100+ copy_pointer_from_array = &copy_pointer_from_array_compat;
76101+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
76102+ } else {
76103+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
76104+ copy_gr_arg = &copy_gr_arg_normal;
76105+ copy_acl_object_label = &copy_acl_object_label_normal;
76106+ copy_acl_subject_label = &copy_acl_subject_label_normal;
76107+ copy_acl_role_label = &copy_acl_role_label_normal;
76108+ copy_acl_ip_label = &copy_acl_ip_label_normal;
76109+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
76110+ copy_role_transition = &copy_role_transition_normal;
76111+ copy_sprole_pw = &copy_sprole_pw_normal;
76112+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
76113+ copy_pointer_from_array = &copy_pointer_from_array_normal;
76114+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
76115+ }
76116+ pax_close_kernel();
76117+#endif
76118+
76119+ req_count = get_gr_arg_wrapper_size();
76120+
76121+ if (count != req_count) {
76122+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
76123+ error = -EINVAL;
76124+ goto out;
76125+ }
76126+
76127+
76128+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
76129+ gr_auth_expires = 0;
76130+ gr_auth_attempts = 0;
76131+ }
76132+
76133+ error = copy_gr_arg_wrapper(buf, &uwrap);
76134+ if (error)
76135+ goto out;
76136+
76137+ error = copy_gr_arg(uwrap.arg, gr_usermode);
76138+ if (error)
76139+ goto out;
76140+
76141+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76142+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
76143+ time_after(gr_auth_expires, get_seconds())) {
76144+ error = -EBUSY;
76145+ goto out;
76146+ }
76147+
76148+ /* if non-root trying to do anything other than use a special role,
76149+ do not attempt authentication, do not count towards authentication
76150+ locking
76151+ */
76152+
76153+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
76154+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76155+ gr_is_global_nonroot(current_uid())) {
76156+ error = -EPERM;
76157+ goto out;
76158+ }
76159+
76160+ /* ensure pw and special role name are null terminated */
76161+
76162+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
76163+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
76164+
76165+ /* Okay.
76166+ * We have our enough of the argument structure..(we have yet
76167+ * to copy_from_user the tables themselves) . Copy the tables
76168+ * only if we need them, i.e. for loading operations. */
76169+
76170+ switch (gr_usermode->mode) {
76171+ case GR_STATUS:
76172+ if (gr_acl_is_enabled()) {
76173+ error = 1;
76174+ if (!gr_check_secure_terminal(current))
76175+ error = 3;
76176+ } else
76177+ error = 2;
76178+ goto out;
76179+ case GR_SHUTDOWN:
76180+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76181+ stop_machine(gr_rbac_disable, NULL, NULL);
76182+ free_variables(false);
76183+ memset(gr_usermode, 0, sizeof(struct gr_arg));
76184+ memset(gr_system_salt, 0, GR_SALT_LEN);
76185+ memset(gr_system_sum, 0, GR_SHA_LEN);
76186+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
76187+ } else if (gr_acl_is_enabled()) {
76188+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
76189+ error = -EPERM;
76190+ } else {
76191+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
76192+ error = -EAGAIN;
76193+ }
76194+ break;
76195+ case GR_ENABLE:
76196+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
76197+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
76198+ else {
76199+ if (gr_acl_is_enabled())
76200+ error = -EAGAIN;
76201+ else
76202+ error = error2;
76203+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
76204+ }
76205+ break;
76206+ case GR_OLDRELOAD:
76207+ oldmode = 1;
76208+ case GR_RELOAD:
76209+ if (!gr_acl_is_enabled()) {
76210+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
76211+ error = -EAGAIN;
76212+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76213+ error2 = gracl_reload(gr_usermode, oldmode);
76214+ if (!error2)
76215+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
76216+ else {
76217+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76218+ error = error2;
76219+ }
76220+ } else {
76221+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76222+ error = -EPERM;
76223+ }
76224+ break;
76225+ case GR_SEGVMOD:
76226+ if (unlikely(!gr_acl_is_enabled())) {
76227+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
76228+ error = -EAGAIN;
76229+ break;
76230+ }
76231+
76232+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76233+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
76234+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
76235+ struct acl_subject_label *segvacl;
76236+ segvacl =
76237+ lookup_acl_subj_label(gr_usermode->segv_inode,
76238+ gr_usermode->segv_device,
76239+ current->role);
76240+ if (segvacl) {
76241+ segvacl->crashes = 0;
76242+ segvacl->expires = 0;
76243+ }
76244+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
76245+ gr_remove_uid(gr_usermode->segv_uid);
76246+ }
76247+ } else {
76248+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
76249+ error = -EPERM;
76250+ }
76251+ break;
76252+ case GR_SPROLE:
76253+ case GR_SPROLEPAM:
76254+ if (unlikely(!gr_acl_is_enabled())) {
76255+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
76256+ error = -EAGAIN;
76257+ break;
76258+ }
76259+
76260+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
76261+ current->role->expires = 0;
76262+ current->role->auth_attempts = 0;
76263+ }
76264+
76265+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
76266+ time_after(current->role->expires, get_seconds())) {
76267+ error = -EBUSY;
76268+ goto out;
76269+ }
76270+
76271+ if (lookup_special_role_auth
76272+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
76273+ && ((!sprole_salt && !sprole_sum)
76274+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
76275+ char *p = "";
76276+ assign_special_role(gr_usermode->sp_role);
76277+ read_lock(&tasklist_lock);
76278+ if (current->real_parent)
76279+ p = current->real_parent->role->rolename;
76280+ read_unlock(&tasklist_lock);
76281+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
76282+ p, acl_sp_role_value);
76283+ } else {
76284+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
76285+ error = -EPERM;
76286+ if(!(current->role->auth_attempts++))
76287+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
76288+
76289+ goto out;
76290+ }
76291+ break;
76292+ case GR_UNSPROLE:
76293+ if (unlikely(!gr_acl_is_enabled())) {
76294+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
76295+ error = -EAGAIN;
76296+ break;
76297+ }
76298+
76299+ if (current->role->roletype & GR_ROLE_SPECIAL) {
76300+ char *p = "";
76301+ int i = 0;
76302+
76303+ read_lock(&tasklist_lock);
76304+ if (current->real_parent) {
76305+ p = current->real_parent->role->rolename;
76306+ i = current->real_parent->acl_role_id;
76307+ }
76308+ read_unlock(&tasklist_lock);
76309+
76310+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
76311+ gr_set_acls(1);
76312+ } else {
76313+ error = -EPERM;
76314+ goto out;
76315+ }
76316+ break;
76317+ default:
76318+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
76319+ error = -EINVAL;
76320+ break;
76321+ }
76322+
76323+ if (error != -EPERM)
76324+ goto out;
76325+
76326+ if(!(gr_auth_attempts++))
76327+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
76328+
76329+ out:
76330+ mutex_unlock(&gr_dev_mutex);
76331+
76332+ if (!error)
76333+ error = req_count;
76334+
76335+ return error;
76336+}
76337+
76338+int
76339+gr_set_acls(const int type)
76340+{
76341+ struct task_struct *task, *task2;
76342+ struct acl_role_label *role = current->role;
76343+ struct acl_subject_label *subj;
76344+ __u16 acl_role_id = current->acl_role_id;
76345+ const struct cred *cred;
76346+ int ret;
76347+
76348+ rcu_read_lock();
76349+ read_lock(&tasklist_lock);
76350+ read_lock(&grsec_exec_file_lock);
76351+ do_each_thread(task2, task) {
76352+ /* check to see if we're called from the exit handler,
76353+ if so, only replace ACLs that have inherited the admin
76354+ ACL */
76355+
76356+ if (type && (task->role != role ||
76357+ task->acl_role_id != acl_role_id))
76358+ continue;
76359+
76360+ task->acl_role_id = 0;
76361+ task->acl_sp_role = 0;
76362+ task->inherited = 0;
76363+
76364+ if (task->exec_file) {
76365+ cred = __task_cred(task);
76366+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76367+ subj = __gr_get_subject_for_task(polstate, task, NULL);
76368+ if (subj == NULL) {
76369+ ret = -EINVAL;
76370+ read_unlock(&grsec_exec_file_lock);
76371+ read_unlock(&tasklist_lock);
76372+ rcu_read_unlock();
76373+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
76374+ return ret;
76375+ }
76376+ __gr_apply_subject_to_task(polstate, task, subj);
76377+ } else {
76378+ // it's a kernel process
76379+ task->role = polstate->kernel_role;
76380+ task->acl = polstate->kernel_role->root_label;
76381+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
76382+ task->acl->mode &= ~GR_PROCFIND;
76383+#endif
76384+ }
76385+ } while_each_thread(task2, task);
76386+ read_unlock(&grsec_exec_file_lock);
76387+ read_unlock(&tasklist_lock);
76388+ rcu_read_unlock();
76389+
76390+ return 0;
76391+}
76392diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
76393new file mode 100644
76394index 0000000..39645c9
76395--- /dev/null
76396+++ b/grsecurity/gracl_res.c
76397@@ -0,0 +1,68 @@
76398+#include <linux/kernel.h>
76399+#include <linux/sched.h>
76400+#include <linux/gracl.h>
76401+#include <linux/grinternal.h>
76402+
76403+static const char *restab_log[] = {
76404+ [RLIMIT_CPU] = "RLIMIT_CPU",
76405+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
76406+ [RLIMIT_DATA] = "RLIMIT_DATA",
76407+ [RLIMIT_STACK] = "RLIMIT_STACK",
76408+ [RLIMIT_CORE] = "RLIMIT_CORE",
76409+ [RLIMIT_RSS] = "RLIMIT_RSS",
76410+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
76411+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
76412+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
76413+ [RLIMIT_AS] = "RLIMIT_AS",
76414+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
76415+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
76416+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
76417+ [RLIMIT_NICE] = "RLIMIT_NICE",
76418+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
76419+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
76420+ [GR_CRASH_RES] = "RLIMIT_CRASH"
76421+};
76422+
76423+void
76424+gr_log_resource(const struct task_struct *task,
76425+ const int res, const unsigned long wanted, const int gt)
76426+{
76427+ const struct cred *cred;
76428+ unsigned long rlim;
76429+
76430+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
76431+ return;
76432+
76433+ // not yet supported resource
76434+ if (unlikely(!restab_log[res]))
76435+ return;
76436+
76437+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
76438+ rlim = task_rlimit_max(task, res);
76439+ else
76440+ rlim = task_rlimit(task, res);
76441+
76442+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
76443+ return;
76444+
76445+ rcu_read_lock();
76446+ cred = __task_cred(task);
76447+
76448+ if (res == RLIMIT_NPROC &&
76449+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
76450+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
76451+ goto out_rcu_unlock;
76452+ else if (res == RLIMIT_MEMLOCK &&
76453+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
76454+ goto out_rcu_unlock;
76455+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
76456+ goto out_rcu_unlock;
76457+ rcu_read_unlock();
76458+
76459+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
76460+
76461+ return;
76462+out_rcu_unlock:
76463+ rcu_read_unlock();
76464+ return;
76465+}
76466diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
76467new file mode 100644
76468index 0000000..2040e61
76469--- /dev/null
76470+++ b/grsecurity/gracl_segv.c
76471@@ -0,0 +1,313 @@
76472+#include <linux/kernel.h>
76473+#include <linux/mm.h>
76474+#include <asm/uaccess.h>
76475+#include <asm/errno.h>
76476+#include <asm/mman.h>
76477+#include <net/sock.h>
76478+#include <linux/file.h>
76479+#include <linux/fs.h>
76480+#include <linux/net.h>
76481+#include <linux/in.h>
76482+#include <linux/slab.h>
76483+#include <linux/types.h>
76484+#include <linux/sched.h>
76485+#include <linux/timer.h>
76486+#include <linux/gracl.h>
76487+#include <linux/grsecurity.h>
76488+#include <linux/grinternal.h>
76489+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
76490+#include <linux/magic.h>
76491+#include <linux/pagemap.h>
76492+#include "../fs/btrfs/async-thread.h"
76493+#include "../fs/btrfs/ctree.h"
76494+#include "../fs/btrfs/btrfs_inode.h"
76495+#endif
76496+
76497+static struct crash_uid *uid_set;
76498+static unsigned short uid_used;
76499+static DEFINE_SPINLOCK(gr_uid_lock);
76500+extern rwlock_t gr_inode_lock;
76501+extern struct acl_subject_label *
76502+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
76503+ struct acl_role_label *role);
76504+
76505+static inline dev_t __get_dev(const struct dentry *dentry)
76506+{
76507+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
76508+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
76509+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
76510+ else
76511+#endif
76512+ return dentry->d_sb->s_dev;
76513+}
76514+
76515+int
76516+gr_init_uidset(void)
76517+{
76518+ uid_set =
76519+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
76520+ uid_used = 0;
76521+
76522+ return uid_set ? 1 : 0;
76523+}
76524+
76525+void
76526+gr_free_uidset(void)
76527+{
76528+ if (uid_set) {
76529+ struct crash_uid *tmpset;
76530+ spin_lock(&gr_uid_lock);
76531+ tmpset = uid_set;
76532+ uid_set = NULL;
76533+ uid_used = 0;
76534+ spin_unlock(&gr_uid_lock);
76535+ if (tmpset)
76536+ kfree(tmpset);
76537+ }
76538+
76539+ return;
76540+}
76541+
76542+int
76543+gr_find_uid(const uid_t uid)
76544+{
76545+ struct crash_uid *tmp = uid_set;
76546+ uid_t buid;
76547+ int low = 0, high = uid_used - 1, mid;
76548+
76549+ while (high >= low) {
76550+ mid = (low + high) >> 1;
76551+ buid = tmp[mid].uid;
76552+ if (buid == uid)
76553+ return mid;
76554+ if (buid > uid)
76555+ high = mid - 1;
76556+ if (buid < uid)
76557+ low = mid + 1;
76558+ }
76559+
76560+ return -1;
76561+}
76562+
76563+static __inline__ void
76564+gr_insertsort(void)
76565+{
76566+ unsigned short i, j;
76567+ struct crash_uid index;
76568+
76569+ for (i = 1; i < uid_used; i++) {
76570+ index = uid_set[i];
76571+ j = i;
76572+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
76573+ uid_set[j] = uid_set[j - 1];
76574+ j--;
76575+ }
76576+ uid_set[j] = index;
76577+ }
76578+
76579+ return;
76580+}
76581+
76582+static __inline__ void
76583+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
76584+{
76585+ int loc;
76586+ uid_t uid = GR_GLOBAL_UID(kuid);
76587+
76588+ if (uid_used == GR_UIDTABLE_MAX)
76589+ return;
76590+
76591+ loc = gr_find_uid(uid);
76592+
76593+ if (loc >= 0) {
76594+ uid_set[loc].expires = expires;
76595+ return;
76596+ }
76597+
76598+ uid_set[uid_used].uid = uid;
76599+ uid_set[uid_used].expires = expires;
76600+ uid_used++;
76601+
76602+ gr_insertsort();
76603+
76604+ return;
76605+}
76606+
76607+void
76608+gr_remove_uid(const unsigned short loc)
76609+{
76610+ unsigned short i;
76611+
76612+ for (i = loc + 1; i < uid_used; i++)
76613+ uid_set[i - 1] = uid_set[i];
76614+
76615+ uid_used--;
76616+
76617+ return;
76618+}
76619+
76620+int
76621+gr_check_crash_uid(const kuid_t kuid)
76622+{
76623+ int loc;
76624+ int ret = 0;
76625+ uid_t uid;
76626+
76627+ if (unlikely(!gr_acl_is_enabled()))
76628+ return 0;
76629+
76630+ uid = GR_GLOBAL_UID(kuid);
76631+
76632+ spin_lock(&gr_uid_lock);
76633+ loc = gr_find_uid(uid);
76634+
76635+ if (loc < 0)
76636+ goto out_unlock;
76637+
76638+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
76639+ gr_remove_uid(loc);
76640+ else
76641+ ret = 1;
76642+
76643+out_unlock:
76644+ spin_unlock(&gr_uid_lock);
76645+ return ret;
76646+}
76647+
76648+static __inline__ int
76649+proc_is_setxid(const struct cred *cred)
76650+{
76651+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
76652+ !uid_eq(cred->uid, cred->fsuid))
76653+ return 1;
76654+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
76655+ !gid_eq(cred->gid, cred->fsgid))
76656+ return 1;
76657+
76658+ return 0;
76659+}
76660+
76661+extern int gr_fake_force_sig(int sig, struct task_struct *t);
76662+
76663+void
76664+gr_handle_crash(struct task_struct *task, const int sig)
76665+{
76666+ struct acl_subject_label *curr;
76667+ struct task_struct *tsk, *tsk2;
76668+ const struct cred *cred;
76669+ const struct cred *cred2;
76670+
76671+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
76672+ return;
76673+
76674+ if (unlikely(!gr_acl_is_enabled()))
76675+ return;
76676+
76677+ curr = task->acl;
76678+
76679+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
76680+ return;
76681+
76682+ if (time_before_eq(curr->expires, get_seconds())) {
76683+ curr->expires = 0;
76684+ curr->crashes = 0;
76685+ }
76686+
76687+ curr->crashes++;
76688+
76689+ if (!curr->expires)
76690+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
76691+
76692+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
76693+ time_after(curr->expires, get_seconds())) {
76694+ rcu_read_lock();
76695+ cred = __task_cred(task);
76696+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
76697+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
76698+ spin_lock(&gr_uid_lock);
76699+ gr_insert_uid(cred->uid, curr->expires);
76700+ spin_unlock(&gr_uid_lock);
76701+ curr->expires = 0;
76702+ curr->crashes = 0;
76703+ read_lock(&tasklist_lock);
76704+ do_each_thread(tsk2, tsk) {
76705+ cred2 = __task_cred(tsk);
76706+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
76707+ gr_fake_force_sig(SIGKILL, tsk);
76708+ } while_each_thread(tsk2, tsk);
76709+ read_unlock(&tasklist_lock);
76710+ } else {
76711+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
76712+ read_lock(&tasklist_lock);
76713+ read_lock(&grsec_exec_file_lock);
76714+ do_each_thread(tsk2, tsk) {
76715+ if (likely(tsk != task)) {
76716+ // if this thread has the same subject as the one that triggered
76717+ // RES_CRASH and it's the same binary, kill it
76718+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
76719+ gr_fake_force_sig(SIGKILL, tsk);
76720+ }
76721+ } while_each_thread(tsk2, tsk);
76722+ read_unlock(&grsec_exec_file_lock);
76723+ read_unlock(&tasklist_lock);
76724+ }
76725+ rcu_read_unlock();
76726+ }
76727+
76728+ return;
76729+}
76730+
76731+int
76732+gr_check_crash_exec(const struct file *filp)
76733+{
76734+ struct acl_subject_label *curr;
76735+
76736+ if (unlikely(!gr_acl_is_enabled()))
76737+ return 0;
76738+
76739+ read_lock(&gr_inode_lock);
76740+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
76741+ __get_dev(filp->f_path.dentry),
76742+ current->role);
76743+ read_unlock(&gr_inode_lock);
76744+
76745+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
76746+ (!curr->crashes && !curr->expires))
76747+ return 0;
76748+
76749+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
76750+ time_after(curr->expires, get_seconds()))
76751+ return 1;
76752+ else if (time_before_eq(curr->expires, get_seconds())) {
76753+ curr->crashes = 0;
76754+ curr->expires = 0;
76755+ }
76756+
76757+ return 0;
76758+}
76759+
76760+void
76761+gr_handle_alertkill(struct task_struct *task)
76762+{
76763+ struct acl_subject_label *curracl;
76764+ __u32 curr_ip;
76765+ struct task_struct *p, *p2;
76766+
76767+ if (unlikely(!gr_acl_is_enabled()))
76768+ return;
76769+
76770+ curracl = task->acl;
76771+ curr_ip = task->signal->curr_ip;
76772+
76773+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
76774+ read_lock(&tasklist_lock);
76775+ do_each_thread(p2, p) {
76776+ if (p->signal->curr_ip == curr_ip)
76777+ gr_fake_force_sig(SIGKILL, p);
76778+ } while_each_thread(p2, p);
76779+ read_unlock(&tasklist_lock);
76780+ } else if (curracl->mode & GR_KILLPROC)
76781+ gr_fake_force_sig(SIGKILL, task);
76782+
76783+ return;
76784+}
76785diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
76786new file mode 100644
76787index 0000000..98011b0
76788--- /dev/null
76789+++ b/grsecurity/gracl_shm.c
76790@@ -0,0 +1,40 @@
76791+#include <linux/kernel.h>
76792+#include <linux/mm.h>
76793+#include <linux/sched.h>
76794+#include <linux/file.h>
76795+#include <linux/ipc.h>
76796+#include <linux/gracl.h>
76797+#include <linux/grsecurity.h>
76798+#include <linux/grinternal.h>
76799+
76800+int
76801+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76802+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
76803+{
76804+ struct task_struct *task;
76805+
76806+ if (!gr_acl_is_enabled())
76807+ return 1;
76808+
76809+ rcu_read_lock();
76810+ read_lock(&tasklist_lock);
76811+
76812+ task = find_task_by_vpid(shm_cprid);
76813+
76814+ if (unlikely(!task))
76815+ task = find_task_by_vpid(shm_lapid);
76816+
76817+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
76818+ (task_pid_nr(task) == shm_lapid)) &&
76819+ (task->acl->mode & GR_PROTSHM) &&
76820+ (task->acl != current->acl))) {
76821+ read_unlock(&tasklist_lock);
76822+ rcu_read_unlock();
76823+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
76824+ return 0;
76825+ }
76826+ read_unlock(&tasklist_lock);
76827+ rcu_read_unlock();
76828+
76829+ return 1;
76830+}
76831diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
76832new file mode 100644
76833index 0000000..bc0be01
76834--- /dev/null
76835+++ b/grsecurity/grsec_chdir.c
76836@@ -0,0 +1,19 @@
76837+#include <linux/kernel.h>
76838+#include <linux/sched.h>
76839+#include <linux/fs.h>
76840+#include <linux/file.h>
76841+#include <linux/grsecurity.h>
76842+#include <linux/grinternal.h>
76843+
76844+void
76845+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
76846+{
76847+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
76848+ if ((grsec_enable_chdir && grsec_enable_group &&
76849+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
76850+ !grsec_enable_group)) {
76851+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
76852+ }
76853+#endif
76854+ return;
76855+}
76856diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
76857new file mode 100644
76858index 0000000..baa635c
76859--- /dev/null
76860+++ b/grsecurity/grsec_chroot.c
76861@@ -0,0 +1,387 @@
76862+#include <linux/kernel.h>
76863+#include <linux/module.h>
76864+#include <linux/sched.h>
76865+#include <linux/file.h>
76866+#include <linux/fs.h>
76867+#include <linux/mount.h>
76868+#include <linux/types.h>
76869+#include "../fs/mount.h"
76870+#include <linux/grsecurity.h>
76871+#include <linux/grinternal.h>
76872+
76873+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76874+int gr_init_ran;
76875+#endif
76876+
76877+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
76878+{
76879+#ifdef CONFIG_GRKERNSEC
76880+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
76881+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
76882+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76883+ && gr_init_ran
76884+#endif
76885+ )
76886+ task->gr_is_chrooted = 1;
76887+ else {
76888+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76889+ if (task_pid_nr(task) == 1 && !gr_init_ran)
76890+ gr_init_ran = 1;
76891+#endif
76892+ task->gr_is_chrooted = 0;
76893+ }
76894+
76895+ task->gr_chroot_dentry = path->dentry;
76896+#endif
76897+ return;
76898+}
76899+
76900+void gr_clear_chroot_entries(struct task_struct *task)
76901+{
76902+#ifdef CONFIG_GRKERNSEC
76903+ task->gr_is_chrooted = 0;
76904+ task->gr_chroot_dentry = NULL;
76905+#endif
76906+ return;
76907+}
76908+
76909+int
76910+gr_handle_chroot_unix(const pid_t pid)
76911+{
76912+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
76913+ struct task_struct *p;
76914+
76915+ if (unlikely(!grsec_enable_chroot_unix))
76916+ return 1;
76917+
76918+ if (likely(!proc_is_chrooted(current)))
76919+ return 1;
76920+
76921+ rcu_read_lock();
76922+ read_lock(&tasklist_lock);
76923+ p = find_task_by_vpid_unrestricted(pid);
76924+ if (unlikely(p && !have_same_root(current, p))) {
76925+ read_unlock(&tasklist_lock);
76926+ rcu_read_unlock();
76927+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
76928+ return 0;
76929+ }
76930+ read_unlock(&tasklist_lock);
76931+ rcu_read_unlock();
76932+#endif
76933+ return 1;
76934+}
76935+
76936+int
76937+gr_handle_chroot_nice(void)
76938+{
76939+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76940+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
76941+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
76942+ return -EPERM;
76943+ }
76944+#endif
76945+ return 0;
76946+}
76947+
76948+int
76949+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
76950+{
76951+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76952+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
76953+ && proc_is_chrooted(current)) {
76954+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
76955+ return -EACCES;
76956+ }
76957+#endif
76958+ return 0;
76959+}
76960+
76961+int
76962+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
76963+{
76964+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76965+ struct task_struct *p;
76966+ int ret = 0;
76967+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
76968+ return ret;
76969+
76970+ read_lock(&tasklist_lock);
76971+ do_each_pid_task(pid, type, p) {
76972+ if (!have_same_root(current, p)) {
76973+ ret = 1;
76974+ goto out;
76975+ }
76976+ } while_each_pid_task(pid, type, p);
76977+out:
76978+ read_unlock(&tasklist_lock);
76979+ return ret;
76980+#endif
76981+ return 0;
76982+}
76983+
76984+int
76985+gr_pid_is_chrooted(struct task_struct *p)
76986+{
76987+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76988+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
76989+ return 0;
76990+
76991+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
76992+ !have_same_root(current, p)) {
76993+ return 1;
76994+ }
76995+#endif
76996+ return 0;
76997+}
76998+
76999+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
77000+
77001+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
77002+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
77003+{
77004+ struct path path, currentroot;
77005+ int ret = 0;
77006+
77007+ path.dentry = (struct dentry *)u_dentry;
77008+ path.mnt = (struct vfsmount *)u_mnt;
77009+ get_fs_root(current->fs, &currentroot);
77010+ if (path_is_under(&path, &currentroot))
77011+ ret = 1;
77012+ path_put(&currentroot);
77013+
77014+ return ret;
77015+}
77016+#endif
77017+
77018+int
77019+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
77020+{
77021+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77022+ if (!grsec_enable_chroot_fchdir)
77023+ return 1;
77024+
77025+ if (!proc_is_chrooted(current))
77026+ return 1;
77027+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
77028+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
77029+ return 0;
77030+ }
77031+#endif
77032+ return 1;
77033+}
77034+
77035+int
77036+gr_chroot_fhandle(void)
77037+{
77038+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77039+ if (!grsec_enable_chroot_fchdir)
77040+ return 1;
77041+
77042+ if (!proc_is_chrooted(current))
77043+ return 1;
77044+ else {
77045+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
77046+ return 0;
77047+ }
77048+#endif
77049+ return 1;
77050+}
77051+
77052+int
77053+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77054+ const time_t shm_createtime)
77055+{
77056+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77057+ struct task_struct *p;
77058+ time_t starttime;
77059+
77060+ if (unlikely(!grsec_enable_chroot_shmat))
77061+ return 1;
77062+
77063+ if (likely(!proc_is_chrooted(current)))
77064+ return 1;
77065+
77066+ rcu_read_lock();
77067+ read_lock(&tasklist_lock);
77068+
77069+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
77070+ starttime = p->start_time.tv_sec;
77071+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
77072+ if (have_same_root(current, p)) {
77073+ goto allow;
77074+ } else {
77075+ read_unlock(&tasklist_lock);
77076+ rcu_read_unlock();
77077+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
77078+ return 0;
77079+ }
77080+ }
77081+ /* creator exited, pid reuse, fall through to next check */
77082+ }
77083+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
77084+ if (unlikely(!have_same_root(current, p))) {
77085+ read_unlock(&tasklist_lock);
77086+ rcu_read_unlock();
77087+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
77088+ return 0;
77089+ }
77090+ }
77091+
77092+allow:
77093+ read_unlock(&tasklist_lock);
77094+ rcu_read_unlock();
77095+#endif
77096+ return 1;
77097+}
77098+
77099+void
77100+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
77101+{
77102+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77103+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
77104+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
77105+#endif
77106+ return;
77107+}
77108+
77109+int
77110+gr_handle_chroot_mknod(const struct dentry *dentry,
77111+ const struct vfsmount *mnt, const int mode)
77112+{
77113+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77114+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
77115+ proc_is_chrooted(current)) {
77116+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
77117+ return -EPERM;
77118+ }
77119+#endif
77120+ return 0;
77121+}
77122+
77123+int
77124+gr_handle_chroot_mount(const struct dentry *dentry,
77125+ const struct vfsmount *mnt, const char *dev_name)
77126+{
77127+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77128+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
77129+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
77130+ return -EPERM;
77131+ }
77132+#endif
77133+ return 0;
77134+}
77135+
77136+int
77137+gr_handle_chroot_pivot(void)
77138+{
77139+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77140+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
77141+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
77142+ return -EPERM;
77143+ }
77144+#endif
77145+ return 0;
77146+}
77147+
77148+int
77149+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
77150+{
77151+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77152+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
77153+ !gr_is_outside_chroot(dentry, mnt)) {
77154+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
77155+ return -EPERM;
77156+ }
77157+#endif
77158+ return 0;
77159+}
77160+
77161+extern const char *captab_log[];
77162+extern int captab_log_entries;
77163+
77164+int
77165+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77166+{
77167+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77168+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77169+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77170+ if (cap_raised(chroot_caps, cap)) {
77171+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
77172+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
77173+ }
77174+ return 0;
77175+ }
77176+ }
77177+#endif
77178+ return 1;
77179+}
77180+
77181+int
77182+gr_chroot_is_capable(const int cap)
77183+{
77184+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77185+ return gr_task_chroot_is_capable(current, current_cred(), cap);
77186+#endif
77187+ return 1;
77188+}
77189+
77190+int
77191+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
77192+{
77193+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77194+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77195+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77196+ if (cap_raised(chroot_caps, cap)) {
77197+ return 0;
77198+ }
77199+ }
77200+#endif
77201+ return 1;
77202+}
77203+
77204+int
77205+gr_chroot_is_capable_nolog(const int cap)
77206+{
77207+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77208+ return gr_task_chroot_is_capable_nolog(current, cap);
77209+#endif
77210+ return 1;
77211+}
77212+
77213+int
77214+gr_handle_chroot_sysctl(const int op)
77215+{
77216+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77217+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
77218+ proc_is_chrooted(current))
77219+ return -EACCES;
77220+#endif
77221+ return 0;
77222+}
77223+
77224+void
77225+gr_handle_chroot_chdir(const struct path *path)
77226+{
77227+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77228+ if (grsec_enable_chroot_chdir)
77229+ set_fs_pwd(current->fs, path);
77230+#endif
77231+ return;
77232+}
77233+
77234+int
77235+gr_handle_chroot_chmod(const struct dentry *dentry,
77236+ const struct vfsmount *mnt, const int mode)
77237+{
77238+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77239+ /* allow chmod +s on directories, but not files */
77240+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
77241+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
77242+ proc_is_chrooted(current)) {
77243+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
77244+ return -EPERM;
77245+ }
77246+#endif
77247+ return 0;
77248+}
77249diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
77250new file mode 100644
77251index 0000000..2d3bcb7
77252--- /dev/null
77253+++ b/grsecurity/grsec_disabled.c
77254@@ -0,0 +1,440 @@
77255+#include <linux/kernel.h>
77256+#include <linux/module.h>
77257+#include <linux/sched.h>
77258+#include <linux/file.h>
77259+#include <linux/fs.h>
77260+#include <linux/kdev_t.h>
77261+#include <linux/net.h>
77262+#include <linux/in.h>
77263+#include <linux/ip.h>
77264+#include <linux/skbuff.h>
77265+#include <linux/sysctl.h>
77266+
77267+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
77268+void
77269+pax_set_initial_flags(struct linux_binprm *bprm)
77270+{
77271+ return;
77272+}
77273+#endif
77274+
77275+#ifdef CONFIG_SYSCTL
77276+__u32
77277+gr_handle_sysctl(const struct ctl_table * table, const int op)
77278+{
77279+ return 0;
77280+}
77281+#endif
77282+
77283+#ifdef CONFIG_TASKSTATS
77284+int gr_is_taskstats_denied(int pid)
77285+{
77286+ return 0;
77287+}
77288+#endif
77289+
77290+int
77291+gr_acl_is_enabled(void)
77292+{
77293+ return 0;
77294+}
77295+
77296+int
77297+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
77298+{
77299+ return 0;
77300+}
77301+
77302+void
77303+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
77304+{
77305+ return;
77306+}
77307+
77308+int
77309+gr_handle_rawio(const struct inode *inode)
77310+{
77311+ return 0;
77312+}
77313+
77314+void
77315+gr_acl_handle_psacct(struct task_struct *task, const long code)
77316+{
77317+ return;
77318+}
77319+
77320+int
77321+gr_handle_ptrace(struct task_struct *task, const long request)
77322+{
77323+ return 0;
77324+}
77325+
77326+int
77327+gr_handle_proc_ptrace(struct task_struct *task)
77328+{
77329+ return 0;
77330+}
77331+
77332+int
77333+gr_set_acls(const int type)
77334+{
77335+ return 0;
77336+}
77337+
77338+int
77339+gr_check_hidden_task(const struct task_struct *tsk)
77340+{
77341+ return 0;
77342+}
77343+
77344+int
77345+gr_check_protected_task(const struct task_struct *task)
77346+{
77347+ return 0;
77348+}
77349+
77350+int
77351+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
77352+{
77353+ return 0;
77354+}
77355+
77356+void
77357+gr_copy_label(struct task_struct *tsk)
77358+{
77359+ return;
77360+}
77361+
77362+void
77363+gr_set_pax_flags(struct task_struct *task)
77364+{
77365+ return;
77366+}
77367+
77368+int
77369+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
77370+ const int unsafe_share)
77371+{
77372+ return 0;
77373+}
77374+
77375+void
77376+gr_handle_delete(const ino_t ino, const dev_t dev)
77377+{
77378+ return;
77379+}
77380+
77381+void
77382+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
77383+{
77384+ return;
77385+}
77386+
77387+void
77388+gr_handle_crash(struct task_struct *task, const int sig)
77389+{
77390+ return;
77391+}
77392+
77393+int
77394+gr_check_crash_exec(const struct file *filp)
77395+{
77396+ return 0;
77397+}
77398+
77399+int
77400+gr_check_crash_uid(const kuid_t uid)
77401+{
77402+ return 0;
77403+}
77404+
77405+void
77406+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
77407+ struct dentry *old_dentry,
77408+ struct dentry *new_dentry,
77409+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
77410+{
77411+ return;
77412+}
77413+
77414+int
77415+gr_search_socket(const int family, const int type, const int protocol)
77416+{
77417+ return 1;
77418+}
77419+
77420+int
77421+gr_search_connectbind(const int mode, const struct socket *sock,
77422+ const struct sockaddr_in *addr)
77423+{
77424+ return 0;
77425+}
77426+
77427+void
77428+gr_handle_alertkill(struct task_struct *task)
77429+{
77430+ return;
77431+}
77432+
77433+__u32
77434+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
77435+{
77436+ return 1;
77437+}
77438+
77439+__u32
77440+gr_acl_handle_hidden_file(const struct dentry * dentry,
77441+ const struct vfsmount * mnt)
77442+{
77443+ return 1;
77444+}
77445+
77446+__u32
77447+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
77448+ int acc_mode)
77449+{
77450+ return 1;
77451+}
77452+
77453+__u32
77454+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
77455+{
77456+ return 1;
77457+}
77458+
77459+__u32
77460+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
77461+{
77462+ return 1;
77463+}
77464+
77465+int
77466+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
77467+ unsigned int *vm_flags)
77468+{
77469+ return 1;
77470+}
77471+
77472+__u32
77473+gr_acl_handle_truncate(const struct dentry * dentry,
77474+ const struct vfsmount * mnt)
77475+{
77476+ return 1;
77477+}
77478+
77479+__u32
77480+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
77481+{
77482+ return 1;
77483+}
77484+
77485+__u32
77486+gr_acl_handle_access(const struct dentry * dentry,
77487+ const struct vfsmount * mnt, const int fmode)
77488+{
77489+ return 1;
77490+}
77491+
77492+__u32
77493+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
77494+ umode_t *mode)
77495+{
77496+ return 1;
77497+}
77498+
77499+__u32
77500+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
77501+{
77502+ return 1;
77503+}
77504+
77505+__u32
77506+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
77507+{
77508+ return 1;
77509+}
77510+
77511+__u32
77512+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
77513+{
77514+ return 1;
77515+}
77516+
77517+void
77518+grsecurity_init(void)
77519+{
77520+ return;
77521+}
77522+
77523+umode_t gr_acl_umask(void)
77524+{
77525+ return 0;
77526+}
77527+
77528+__u32
77529+gr_acl_handle_mknod(const struct dentry * new_dentry,
77530+ const struct dentry * parent_dentry,
77531+ const struct vfsmount * parent_mnt,
77532+ const int mode)
77533+{
77534+ return 1;
77535+}
77536+
77537+__u32
77538+gr_acl_handle_mkdir(const struct dentry * new_dentry,
77539+ const struct dentry * parent_dentry,
77540+ const struct vfsmount * parent_mnt)
77541+{
77542+ return 1;
77543+}
77544+
77545+__u32
77546+gr_acl_handle_symlink(const struct dentry * new_dentry,
77547+ const struct dentry * parent_dentry,
77548+ const struct vfsmount * parent_mnt, const struct filename *from)
77549+{
77550+ return 1;
77551+}
77552+
77553+__u32
77554+gr_acl_handle_link(const struct dentry * new_dentry,
77555+ const struct dentry * parent_dentry,
77556+ const struct vfsmount * parent_mnt,
77557+ const struct dentry * old_dentry,
77558+ const struct vfsmount * old_mnt, const struct filename *to)
77559+{
77560+ return 1;
77561+}
77562+
77563+int
77564+gr_acl_handle_rename(const struct dentry *new_dentry,
77565+ const struct dentry *parent_dentry,
77566+ const struct vfsmount *parent_mnt,
77567+ const struct dentry *old_dentry,
77568+ const struct inode *old_parent_inode,
77569+ const struct vfsmount *old_mnt, const struct filename *newname,
77570+ unsigned int flags)
77571+{
77572+ return 0;
77573+}
77574+
77575+int
77576+gr_acl_handle_filldir(const struct file *file, const char *name,
77577+ const int namelen, const ino_t ino)
77578+{
77579+ return 1;
77580+}
77581+
77582+int
77583+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77584+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
77585+{
77586+ return 1;
77587+}
77588+
77589+int
77590+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
77591+{
77592+ return 0;
77593+}
77594+
77595+int
77596+gr_search_accept(const struct socket *sock)
77597+{
77598+ return 0;
77599+}
77600+
77601+int
77602+gr_search_listen(const struct socket *sock)
77603+{
77604+ return 0;
77605+}
77606+
77607+int
77608+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
77609+{
77610+ return 0;
77611+}
77612+
77613+__u32
77614+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
77615+{
77616+ return 1;
77617+}
77618+
77619+__u32
77620+gr_acl_handle_creat(const struct dentry * dentry,
77621+ const struct dentry * p_dentry,
77622+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
77623+ const int imode)
77624+{
77625+ return 1;
77626+}
77627+
77628+void
77629+gr_acl_handle_exit(void)
77630+{
77631+ return;
77632+}
77633+
77634+int
77635+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
77636+{
77637+ return 1;
77638+}
77639+
77640+void
77641+gr_set_role_label(const kuid_t uid, const kgid_t gid)
77642+{
77643+ return;
77644+}
77645+
77646+int
77647+gr_acl_handle_procpidmem(const struct task_struct *task)
77648+{
77649+ return 0;
77650+}
77651+
77652+int
77653+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
77654+{
77655+ return 0;
77656+}
77657+
77658+int
77659+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
77660+{
77661+ return 0;
77662+}
77663+
77664+int
77665+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
77666+{
77667+ return 0;
77668+}
77669+
77670+int
77671+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
77672+{
77673+ return 0;
77674+}
77675+
77676+int gr_acl_enable_at_secure(void)
77677+{
77678+ return 0;
77679+}
77680+
77681+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
77682+{
77683+ return dentry->d_sb->s_dev;
77684+}
77685+
77686+void gr_put_exec_file(struct task_struct *task)
77687+{
77688+ return;
77689+}
77690+
77691+#ifdef CONFIG_SECURITY
77692+EXPORT_SYMBOL_GPL(gr_check_user_change);
77693+EXPORT_SYMBOL_GPL(gr_check_group_change);
77694+#endif
77695diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
77696new file mode 100644
77697index 0000000..14638ff
77698--- /dev/null
77699+++ b/grsecurity/grsec_exec.c
77700@@ -0,0 +1,188 @@
77701+#include <linux/kernel.h>
77702+#include <linux/sched.h>
77703+#include <linux/file.h>
77704+#include <linux/binfmts.h>
77705+#include <linux/fs.h>
77706+#include <linux/types.h>
77707+#include <linux/grdefs.h>
77708+#include <linux/grsecurity.h>
77709+#include <linux/grinternal.h>
77710+#include <linux/capability.h>
77711+#include <linux/module.h>
77712+#include <linux/compat.h>
77713+
77714+#include <asm/uaccess.h>
77715+
77716+#ifdef CONFIG_GRKERNSEC_EXECLOG
77717+static char gr_exec_arg_buf[132];
77718+static DEFINE_MUTEX(gr_exec_arg_mutex);
77719+#endif
77720+
77721+struct user_arg_ptr {
77722+#ifdef CONFIG_COMPAT
77723+ bool is_compat;
77724+#endif
77725+ union {
77726+ const char __user *const __user *native;
77727+#ifdef CONFIG_COMPAT
77728+ const compat_uptr_t __user *compat;
77729+#endif
77730+ } ptr;
77731+};
77732+
77733+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
77734+
77735+void
77736+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
77737+{
77738+#ifdef CONFIG_GRKERNSEC_EXECLOG
77739+ char *grarg = gr_exec_arg_buf;
77740+ unsigned int i, x, execlen = 0;
77741+ char c;
77742+
77743+ if (!((grsec_enable_execlog && grsec_enable_group &&
77744+ in_group_p(grsec_audit_gid))
77745+ || (grsec_enable_execlog && !grsec_enable_group)))
77746+ return;
77747+
77748+ mutex_lock(&gr_exec_arg_mutex);
77749+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
77750+
77751+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
77752+ const char __user *p;
77753+ unsigned int len;
77754+
77755+ p = get_user_arg_ptr(argv, i);
77756+ if (IS_ERR(p))
77757+ goto log;
77758+
77759+ len = strnlen_user(p, 128 - execlen);
77760+ if (len > 128 - execlen)
77761+ len = 128 - execlen;
77762+ else if (len > 0)
77763+ len--;
77764+ if (copy_from_user(grarg + execlen, p, len))
77765+ goto log;
77766+
77767+ /* rewrite unprintable characters */
77768+ for (x = 0; x < len; x++) {
77769+ c = *(grarg + execlen + x);
77770+ if (c < 32 || c > 126)
77771+ *(grarg + execlen + x) = ' ';
77772+ }
77773+
77774+ execlen += len;
77775+ *(grarg + execlen) = ' ';
77776+ *(grarg + execlen + 1) = '\0';
77777+ execlen++;
77778+ }
77779+
77780+ log:
77781+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
77782+ bprm->file->f_path.mnt, grarg);
77783+ mutex_unlock(&gr_exec_arg_mutex);
77784+#endif
77785+ return;
77786+}
77787+
77788+#ifdef CONFIG_GRKERNSEC
77789+extern int gr_acl_is_capable(const int cap);
77790+extern int gr_acl_is_capable_nolog(const int cap);
77791+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
77792+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
77793+extern int gr_chroot_is_capable(const int cap);
77794+extern int gr_chroot_is_capable_nolog(const int cap);
77795+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
77796+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
77797+#endif
77798+
77799+const char *captab_log[] = {
77800+ "CAP_CHOWN",
77801+ "CAP_DAC_OVERRIDE",
77802+ "CAP_DAC_READ_SEARCH",
77803+ "CAP_FOWNER",
77804+ "CAP_FSETID",
77805+ "CAP_KILL",
77806+ "CAP_SETGID",
77807+ "CAP_SETUID",
77808+ "CAP_SETPCAP",
77809+ "CAP_LINUX_IMMUTABLE",
77810+ "CAP_NET_BIND_SERVICE",
77811+ "CAP_NET_BROADCAST",
77812+ "CAP_NET_ADMIN",
77813+ "CAP_NET_RAW",
77814+ "CAP_IPC_LOCK",
77815+ "CAP_IPC_OWNER",
77816+ "CAP_SYS_MODULE",
77817+ "CAP_SYS_RAWIO",
77818+ "CAP_SYS_CHROOT",
77819+ "CAP_SYS_PTRACE",
77820+ "CAP_SYS_PACCT",
77821+ "CAP_SYS_ADMIN",
77822+ "CAP_SYS_BOOT",
77823+ "CAP_SYS_NICE",
77824+ "CAP_SYS_RESOURCE",
77825+ "CAP_SYS_TIME",
77826+ "CAP_SYS_TTY_CONFIG",
77827+ "CAP_MKNOD",
77828+ "CAP_LEASE",
77829+ "CAP_AUDIT_WRITE",
77830+ "CAP_AUDIT_CONTROL",
77831+ "CAP_SETFCAP",
77832+ "CAP_MAC_OVERRIDE",
77833+ "CAP_MAC_ADMIN",
77834+ "CAP_SYSLOG",
77835+ "CAP_WAKE_ALARM",
77836+ "CAP_BLOCK_SUSPEND"
77837+};
77838+
77839+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
77840+
77841+int gr_is_capable(const int cap)
77842+{
77843+#ifdef CONFIG_GRKERNSEC
77844+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
77845+ return 1;
77846+ return 0;
77847+#else
77848+ return 1;
77849+#endif
77850+}
77851+
77852+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77853+{
77854+#ifdef CONFIG_GRKERNSEC
77855+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
77856+ return 1;
77857+ return 0;
77858+#else
77859+ return 1;
77860+#endif
77861+}
77862+
77863+int gr_is_capable_nolog(const int cap)
77864+{
77865+#ifdef CONFIG_GRKERNSEC
77866+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
77867+ return 1;
77868+ return 0;
77869+#else
77870+ return 1;
77871+#endif
77872+}
77873+
77874+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
77875+{
77876+#ifdef CONFIG_GRKERNSEC
77877+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
77878+ return 1;
77879+ return 0;
77880+#else
77881+ return 1;
77882+#endif
77883+}
77884+
77885+EXPORT_SYMBOL_GPL(gr_is_capable);
77886+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
77887+EXPORT_SYMBOL_GPL(gr_task_is_capable);
77888+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
77889diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
77890new file mode 100644
77891index 0000000..06cc6ea
77892--- /dev/null
77893+++ b/grsecurity/grsec_fifo.c
77894@@ -0,0 +1,24 @@
77895+#include <linux/kernel.h>
77896+#include <linux/sched.h>
77897+#include <linux/fs.h>
77898+#include <linux/file.h>
77899+#include <linux/grinternal.h>
77900+
77901+int
77902+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
77903+ const struct dentry *dir, const int flag, const int acc_mode)
77904+{
77905+#ifdef CONFIG_GRKERNSEC_FIFO
77906+ const struct cred *cred = current_cred();
77907+
77908+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
77909+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
77910+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
77911+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
77912+ if (!inode_permission(dentry->d_inode, acc_mode))
77913+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
77914+ return -EACCES;
77915+ }
77916+#endif
77917+ return 0;
77918+}
77919diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
77920new file mode 100644
77921index 0000000..8ca18bf
77922--- /dev/null
77923+++ b/grsecurity/grsec_fork.c
77924@@ -0,0 +1,23 @@
77925+#include <linux/kernel.h>
77926+#include <linux/sched.h>
77927+#include <linux/grsecurity.h>
77928+#include <linux/grinternal.h>
77929+#include <linux/errno.h>
77930+
77931+void
77932+gr_log_forkfail(const int retval)
77933+{
77934+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77935+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
77936+ switch (retval) {
77937+ case -EAGAIN:
77938+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
77939+ break;
77940+ case -ENOMEM:
77941+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
77942+ break;
77943+ }
77944+ }
77945+#endif
77946+ return;
77947+}
77948diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
77949new file mode 100644
77950index 0000000..b7cb191
77951--- /dev/null
77952+++ b/grsecurity/grsec_init.c
77953@@ -0,0 +1,286 @@
77954+#include <linux/kernel.h>
77955+#include <linux/sched.h>
77956+#include <linux/mm.h>
77957+#include <linux/gracl.h>
77958+#include <linux/slab.h>
77959+#include <linux/vmalloc.h>
77960+#include <linux/percpu.h>
77961+#include <linux/module.h>
77962+
77963+int grsec_enable_ptrace_readexec;
77964+int grsec_enable_setxid;
77965+int grsec_enable_symlinkown;
77966+kgid_t grsec_symlinkown_gid;
77967+int grsec_enable_brute;
77968+int grsec_enable_link;
77969+int grsec_enable_dmesg;
77970+int grsec_enable_harden_ptrace;
77971+int grsec_enable_harden_ipc;
77972+int grsec_enable_fifo;
77973+int grsec_enable_execlog;
77974+int grsec_enable_signal;
77975+int grsec_enable_forkfail;
77976+int grsec_enable_audit_ptrace;
77977+int grsec_enable_time;
77978+int grsec_enable_group;
77979+kgid_t grsec_audit_gid;
77980+int grsec_enable_chdir;
77981+int grsec_enable_mount;
77982+int grsec_enable_rofs;
77983+int grsec_deny_new_usb;
77984+int grsec_enable_chroot_findtask;
77985+int grsec_enable_chroot_mount;
77986+int grsec_enable_chroot_shmat;
77987+int grsec_enable_chroot_fchdir;
77988+int grsec_enable_chroot_double;
77989+int grsec_enable_chroot_pivot;
77990+int grsec_enable_chroot_chdir;
77991+int grsec_enable_chroot_chmod;
77992+int grsec_enable_chroot_mknod;
77993+int grsec_enable_chroot_nice;
77994+int grsec_enable_chroot_execlog;
77995+int grsec_enable_chroot_caps;
77996+int grsec_enable_chroot_sysctl;
77997+int grsec_enable_chroot_unix;
77998+int grsec_enable_tpe;
77999+kgid_t grsec_tpe_gid;
78000+int grsec_enable_blackhole;
78001+#ifdef CONFIG_IPV6_MODULE
78002+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
78003+#endif
78004+int grsec_lastack_retries;
78005+int grsec_enable_tpe_all;
78006+int grsec_enable_tpe_invert;
78007+int grsec_enable_socket_all;
78008+kgid_t grsec_socket_all_gid;
78009+int grsec_enable_socket_client;
78010+kgid_t grsec_socket_client_gid;
78011+int grsec_enable_socket_server;
78012+kgid_t grsec_socket_server_gid;
78013+int grsec_resource_logging;
78014+int grsec_disable_privio;
78015+int grsec_enable_log_rwxmaps;
78016+int grsec_lock;
78017+
78018+DEFINE_SPINLOCK(grsec_alert_lock);
78019+unsigned long grsec_alert_wtime = 0;
78020+unsigned long grsec_alert_fyet = 0;
78021+
78022+DEFINE_SPINLOCK(grsec_audit_lock);
78023+
78024+DEFINE_RWLOCK(grsec_exec_file_lock);
78025+
78026+char *gr_shared_page[4];
78027+
78028+char *gr_alert_log_fmt;
78029+char *gr_audit_log_fmt;
78030+char *gr_alert_log_buf;
78031+char *gr_audit_log_buf;
78032+
78033+extern struct gr_arg *gr_usermode;
78034+extern unsigned char *gr_system_salt;
78035+extern unsigned char *gr_system_sum;
78036+
78037+void __init
78038+grsecurity_init(void)
78039+{
78040+ int j;
78041+ /* create the per-cpu shared pages */
78042+
78043+#ifdef CONFIG_X86
78044+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
78045+#endif
78046+
78047+ for (j = 0; j < 4; j++) {
78048+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
78049+ if (gr_shared_page[j] == NULL) {
78050+ panic("Unable to allocate grsecurity shared page");
78051+ return;
78052+ }
78053+ }
78054+
78055+ /* allocate log buffers */
78056+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
78057+ if (!gr_alert_log_fmt) {
78058+ panic("Unable to allocate grsecurity alert log format buffer");
78059+ return;
78060+ }
78061+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
78062+ if (!gr_audit_log_fmt) {
78063+ panic("Unable to allocate grsecurity audit log format buffer");
78064+ return;
78065+ }
78066+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
78067+ if (!gr_alert_log_buf) {
78068+ panic("Unable to allocate grsecurity alert log buffer");
78069+ return;
78070+ }
78071+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
78072+ if (!gr_audit_log_buf) {
78073+ panic("Unable to allocate grsecurity audit log buffer");
78074+ return;
78075+ }
78076+
78077+ /* allocate memory for authentication structure */
78078+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
78079+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
78080+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
78081+
78082+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
78083+ panic("Unable to allocate grsecurity authentication structure");
78084+ return;
78085+ }
78086+
78087+#ifdef CONFIG_GRKERNSEC_IO
78088+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
78089+ grsec_disable_privio = 1;
78090+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
78091+ grsec_disable_privio = 1;
78092+#else
78093+ grsec_disable_privio = 0;
78094+#endif
78095+#endif
78096+
78097+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78098+ /* for backward compatibility, tpe_invert always defaults to on if
78099+ enabled in the kernel
78100+ */
78101+ grsec_enable_tpe_invert = 1;
78102+#endif
78103+
78104+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
78105+#ifndef CONFIG_GRKERNSEC_SYSCTL
78106+ grsec_lock = 1;
78107+#endif
78108+
78109+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78110+ grsec_enable_log_rwxmaps = 1;
78111+#endif
78112+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
78113+ grsec_enable_group = 1;
78114+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
78115+#endif
78116+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78117+ grsec_enable_ptrace_readexec = 1;
78118+#endif
78119+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
78120+ grsec_enable_chdir = 1;
78121+#endif
78122+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78123+ grsec_enable_harden_ptrace = 1;
78124+#endif
78125+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78126+ grsec_enable_harden_ipc = 1;
78127+#endif
78128+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78129+ grsec_enable_mount = 1;
78130+#endif
78131+#ifdef CONFIG_GRKERNSEC_LINK
78132+ grsec_enable_link = 1;
78133+#endif
78134+#ifdef CONFIG_GRKERNSEC_BRUTE
78135+ grsec_enable_brute = 1;
78136+#endif
78137+#ifdef CONFIG_GRKERNSEC_DMESG
78138+ grsec_enable_dmesg = 1;
78139+#endif
78140+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78141+ grsec_enable_blackhole = 1;
78142+ grsec_lastack_retries = 4;
78143+#endif
78144+#ifdef CONFIG_GRKERNSEC_FIFO
78145+ grsec_enable_fifo = 1;
78146+#endif
78147+#ifdef CONFIG_GRKERNSEC_EXECLOG
78148+ grsec_enable_execlog = 1;
78149+#endif
78150+#ifdef CONFIG_GRKERNSEC_SETXID
78151+ grsec_enable_setxid = 1;
78152+#endif
78153+#ifdef CONFIG_GRKERNSEC_SIGNAL
78154+ grsec_enable_signal = 1;
78155+#endif
78156+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78157+ grsec_enable_forkfail = 1;
78158+#endif
78159+#ifdef CONFIG_GRKERNSEC_TIME
78160+ grsec_enable_time = 1;
78161+#endif
78162+#ifdef CONFIG_GRKERNSEC_RESLOG
78163+ grsec_resource_logging = 1;
78164+#endif
78165+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78166+ grsec_enable_chroot_findtask = 1;
78167+#endif
78168+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78169+ grsec_enable_chroot_unix = 1;
78170+#endif
78171+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78172+ grsec_enable_chroot_mount = 1;
78173+#endif
78174+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78175+ grsec_enable_chroot_fchdir = 1;
78176+#endif
78177+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78178+ grsec_enable_chroot_shmat = 1;
78179+#endif
78180+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78181+ grsec_enable_audit_ptrace = 1;
78182+#endif
78183+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78184+ grsec_enable_chroot_double = 1;
78185+#endif
78186+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78187+ grsec_enable_chroot_pivot = 1;
78188+#endif
78189+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78190+ grsec_enable_chroot_chdir = 1;
78191+#endif
78192+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78193+ grsec_enable_chroot_chmod = 1;
78194+#endif
78195+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78196+ grsec_enable_chroot_mknod = 1;
78197+#endif
78198+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78199+ grsec_enable_chroot_nice = 1;
78200+#endif
78201+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78202+ grsec_enable_chroot_execlog = 1;
78203+#endif
78204+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78205+ grsec_enable_chroot_caps = 1;
78206+#endif
78207+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78208+ grsec_enable_chroot_sysctl = 1;
78209+#endif
78210+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78211+ grsec_enable_symlinkown = 1;
78212+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
78213+#endif
78214+#ifdef CONFIG_GRKERNSEC_TPE
78215+ grsec_enable_tpe = 1;
78216+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
78217+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78218+ grsec_enable_tpe_all = 1;
78219+#endif
78220+#endif
78221+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78222+ grsec_enable_socket_all = 1;
78223+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
78224+#endif
78225+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78226+ grsec_enable_socket_client = 1;
78227+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
78228+#endif
78229+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78230+ grsec_enable_socket_server = 1;
78231+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
78232+#endif
78233+#endif
78234+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
78235+ grsec_deny_new_usb = 1;
78236+#endif
78237+
78238+ return;
78239+}
78240diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
78241new file mode 100644
78242index 0000000..1773300
78243--- /dev/null
78244+++ b/grsecurity/grsec_ipc.c
78245@@ -0,0 +1,48 @@
78246+#include <linux/kernel.h>
78247+#include <linux/mm.h>
78248+#include <linux/sched.h>
78249+#include <linux/file.h>
78250+#include <linux/ipc.h>
78251+#include <linux/ipc_namespace.h>
78252+#include <linux/grsecurity.h>
78253+#include <linux/grinternal.h>
78254+
78255+int
78256+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
78257+{
78258+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78259+ int write;
78260+ int orig_granted_mode;
78261+ kuid_t euid;
78262+ kgid_t egid;
78263+
78264+ if (!grsec_enable_harden_ipc)
78265+ return 1;
78266+
78267+ euid = current_euid();
78268+ egid = current_egid();
78269+
78270+ write = requested_mode & 00002;
78271+ orig_granted_mode = ipcp->mode;
78272+
78273+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
78274+ orig_granted_mode >>= 6;
78275+ else {
78276+ /* if likely wrong permissions, lock to user */
78277+ if (orig_granted_mode & 0007)
78278+ orig_granted_mode = 0;
78279+ /* otherwise do a egid-only check */
78280+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
78281+ orig_granted_mode >>= 3;
78282+ /* otherwise, no access */
78283+ else
78284+ orig_granted_mode = 0;
78285+ }
78286+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
78287+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
78288+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
78289+ return 0;
78290+ }
78291+#endif
78292+ return 1;
78293+}
78294diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
78295new file mode 100644
78296index 0000000..5e05e20
78297--- /dev/null
78298+++ b/grsecurity/grsec_link.c
78299@@ -0,0 +1,58 @@
78300+#include <linux/kernel.h>
78301+#include <linux/sched.h>
78302+#include <linux/fs.h>
78303+#include <linux/file.h>
78304+#include <linux/grinternal.h>
78305+
78306+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
78307+{
78308+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78309+ const struct inode *link_inode = link->dentry->d_inode;
78310+
78311+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
78312+ /* ignore root-owned links, e.g. /proc/self */
78313+ gr_is_global_nonroot(link_inode->i_uid) && target &&
78314+ !uid_eq(link_inode->i_uid, target->i_uid)) {
78315+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
78316+ return 1;
78317+ }
78318+#endif
78319+ return 0;
78320+}
78321+
78322+int
78323+gr_handle_follow_link(const struct inode *parent,
78324+ const struct inode *inode,
78325+ const struct dentry *dentry, const struct vfsmount *mnt)
78326+{
78327+#ifdef CONFIG_GRKERNSEC_LINK
78328+ const struct cred *cred = current_cred();
78329+
78330+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
78331+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
78332+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
78333+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
78334+ return -EACCES;
78335+ }
78336+#endif
78337+ return 0;
78338+}
78339+
78340+int
78341+gr_handle_hardlink(const struct dentry *dentry,
78342+ const struct vfsmount *mnt,
78343+ struct inode *inode, const int mode, const struct filename *to)
78344+{
78345+#ifdef CONFIG_GRKERNSEC_LINK
78346+ const struct cred *cred = current_cred();
78347+
78348+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
78349+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
78350+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
78351+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
78352+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
78353+ return -EPERM;
78354+ }
78355+#endif
78356+ return 0;
78357+}
78358diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
78359new file mode 100644
78360index 0000000..dbe0a6b
78361--- /dev/null
78362+++ b/grsecurity/grsec_log.c
78363@@ -0,0 +1,341 @@
78364+#include <linux/kernel.h>
78365+#include <linux/sched.h>
78366+#include <linux/file.h>
78367+#include <linux/tty.h>
78368+#include <linux/fs.h>
78369+#include <linux/mm.h>
78370+#include <linux/grinternal.h>
78371+
78372+#ifdef CONFIG_TREE_PREEMPT_RCU
78373+#define DISABLE_PREEMPT() preempt_disable()
78374+#define ENABLE_PREEMPT() preempt_enable()
78375+#else
78376+#define DISABLE_PREEMPT()
78377+#define ENABLE_PREEMPT()
78378+#endif
78379+
78380+#define BEGIN_LOCKS(x) \
78381+ DISABLE_PREEMPT(); \
78382+ rcu_read_lock(); \
78383+ read_lock(&tasklist_lock); \
78384+ read_lock(&grsec_exec_file_lock); \
78385+ if (x != GR_DO_AUDIT) \
78386+ spin_lock(&grsec_alert_lock); \
78387+ else \
78388+ spin_lock(&grsec_audit_lock)
78389+
78390+#define END_LOCKS(x) \
78391+ if (x != GR_DO_AUDIT) \
78392+ spin_unlock(&grsec_alert_lock); \
78393+ else \
78394+ spin_unlock(&grsec_audit_lock); \
78395+ read_unlock(&grsec_exec_file_lock); \
78396+ read_unlock(&tasklist_lock); \
78397+ rcu_read_unlock(); \
78398+ ENABLE_PREEMPT(); \
78399+ if (x == GR_DONT_AUDIT) \
78400+ gr_handle_alertkill(current)
78401+
78402+enum {
78403+ FLOODING,
78404+ NO_FLOODING
78405+};
78406+
78407+extern char *gr_alert_log_fmt;
78408+extern char *gr_audit_log_fmt;
78409+extern char *gr_alert_log_buf;
78410+extern char *gr_audit_log_buf;
78411+
78412+static int gr_log_start(int audit)
78413+{
78414+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
78415+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
78416+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78417+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
78418+ unsigned long curr_secs = get_seconds();
78419+
78420+ if (audit == GR_DO_AUDIT)
78421+ goto set_fmt;
78422+
78423+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
78424+ grsec_alert_wtime = curr_secs;
78425+ grsec_alert_fyet = 0;
78426+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
78427+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
78428+ grsec_alert_fyet++;
78429+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
78430+ grsec_alert_wtime = curr_secs;
78431+ grsec_alert_fyet++;
78432+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
78433+ return FLOODING;
78434+ }
78435+ else return FLOODING;
78436+
78437+set_fmt:
78438+#endif
78439+ memset(buf, 0, PAGE_SIZE);
78440+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
78441+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
78442+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
78443+ } else if (current->signal->curr_ip) {
78444+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
78445+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
78446+ } else if (gr_acl_is_enabled()) {
78447+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
78448+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
78449+ } else {
78450+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
78451+ strcpy(buf, fmt);
78452+ }
78453+
78454+ return NO_FLOODING;
78455+}
78456+
78457+static void gr_log_middle(int audit, const char *msg, va_list ap)
78458+ __attribute__ ((format (printf, 2, 0)));
78459+
78460+static void gr_log_middle(int audit, const char *msg, va_list ap)
78461+{
78462+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78463+ unsigned int len = strlen(buf);
78464+
78465+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
78466+
78467+ return;
78468+}
78469+
78470+static void gr_log_middle_varargs(int audit, const char *msg, ...)
78471+ __attribute__ ((format (printf, 2, 3)));
78472+
78473+static void gr_log_middle_varargs(int audit, const char *msg, ...)
78474+{
78475+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78476+ unsigned int len = strlen(buf);
78477+ va_list ap;
78478+
78479+ va_start(ap, msg);
78480+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
78481+ va_end(ap);
78482+
78483+ return;
78484+}
78485+
78486+static void gr_log_end(int audit, int append_default)
78487+{
78488+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78489+ if (append_default) {
78490+ struct task_struct *task = current;
78491+ struct task_struct *parent = task->real_parent;
78492+ const struct cred *cred = __task_cred(task);
78493+ const struct cred *pcred = __task_cred(parent);
78494+ unsigned int len = strlen(buf);
78495+
78496+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78497+ }
78498+
78499+ printk("%s\n", buf);
78500+
78501+ return;
78502+}
78503+
78504+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
78505+{
78506+ int logtype;
78507+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
78508+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
78509+ void *voidptr = NULL;
78510+ int num1 = 0, num2 = 0;
78511+ unsigned long ulong1 = 0, ulong2 = 0;
78512+ struct dentry *dentry = NULL;
78513+ struct vfsmount *mnt = NULL;
78514+ struct file *file = NULL;
78515+ struct task_struct *task = NULL;
78516+ struct vm_area_struct *vma = NULL;
78517+ const struct cred *cred, *pcred;
78518+ va_list ap;
78519+
78520+ BEGIN_LOCKS(audit);
78521+ logtype = gr_log_start(audit);
78522+ if (logtype == FLOODING) {
78523+ END_LOCKS(audit);
78524+ return;
78525+ }
78526+ va_start(ap, argtypes);
78527+ switch (argtypes) {
78528+ case GR_TTYSNIFF:
78529+ task = va_arg(ap, struct task_struct *);
78530+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
78531+ break;
78532+ case GR_SYSCTL_HIDDEN:
78533+ str1 = va_arg(ap, char *);
78534+ gr_log_middle_varargs(audit, msg, result, str1);
78535+ break;
78536+ case GR_RBAC:
78537+ dentry = va_arg(ap, struct dentry *);
78538+ mnt = va_arg(ap, struct vfsmount *);
78539+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
78540+ break;
78541+ case GR_RBAC_STR:
78542+ dentry = va_arg(ap, struct dentry *);
78543+ mnt = va_arg(ap, struct vfsmount *);
78544+ str1 = va_arg(ap, char *);
78545+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
78546+ break;
78547+ case GR_STR_RBAC:
78548+ str1 = va_arg(ap, char *);
78549+ dentry = va_arg(ap, struct dentry *);
78550+ mnt = va_arg(ap, struct vfsmount *);
78551+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
78552+ break;
78553+ case GR_RBAC_MODE2:
78554+ dentry = va_arg(ap, struct dentry *);
78555+ mnt = va_arg(ap, struct vfsmount *);
78556+ str1 = va_arg(ap, char *);
78557+ str2 = va_arg(ap, char *);
78558+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
78559+ break;
78560+ case GR_RBAC_MODE3:
78561+ dentry = va_arg(ap, struct dentry *);
78562+ mnt = va_arg(ap, struct vfsmount *);
78563+ str1 = va_arg(ap, char *);
78564+ str2 = va_arg(ap, char *);
78565+ str3 = va_arg(ap, char *);
78566+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
78567+ break;
78568+ case GR_FILENAME:
78569+ dentry = va_arg(ap, struct dentry *);
78570+ mnt = va_arg(ap, struct vfsmount *);
78571+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
78572+ break;
78573+ case GR_STR_FILENAME:
78574+ str1 = va_arg(ap, char *);
78575+ dentry = va_arg(ap, struct dentry *);
78576+ mnt = va_arg(ap, struct vfsmount *);
78577+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
78578+ break;
78579+ case GR_FILENAME_STR:
78580+ dentry = va_arg(ap, struct dentry *);
78581+ mnt = va_arg(ap, struct vfsmount *);
78582+ str1 = va_arg(ap, char *);
78583+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
78584+ break;
78585+ case GR_FILENAME_TWO_INT:
78586+ dentry = va_arg(ap, struct dentry *);
78587+ mnt = va_arg(ap, struct vfsmount *);
78588+ num1 = va_arg(ap, int);
78589+ num2 = va_arg(ap, int);
78590+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
78591+ break;
78592+ case GR_FILENAME_TWO_INT_STR:
78593+ dentry = va_arg(ap, struct dentry *);
78594+ mnt = va_arg(ap, struct vfsmount *);
78595+ num1 = va_arg(ap, int);
78596+ num2 = va_arg(ap, int);
78597+ str1 = va_arg(ap, char *);
78598+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
78599+ break;
78600+ case GR_TEXTREL:
78601+ file = va_arg(ap, struct file *);
78602+ ulong1 = va_arg(ap, unsigned long);
78603+ ulong2 = va_arg(ap, unsigned long);
78604+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
78605+ break;
78606+ case GR_PTRACE:
78607+ task = va_arg(ap, struct task_struct *);
78608+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
78609+ break;
78610+ case GR_RESOURCE:
78611+ task = va_arg(ap, struct task_struct *);
78612+ cred = __task_cred(task);
78613+ pcred = __task_cred(task->real_parent);
78614+ ulong1 = va_arg(ap, unsigned long);
78615+ str1 = va_arg(ap, char *);
78616+ ulong2 = va_arg(ap, unsigned long);
78617+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78618+ break;
78619+ case GR_CAP:
78620+ task = va_arg(ap, struct task_struct *);
78621+ cred = __task_cred(task);
78622+ pcred = __task_cred(task->real_parent);
78623+ str1 = va_arg(ap, char *);
78624+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78625+ break;
78626+ case GR_SIG:
78627+ str1 = va_arg(ap, char *);
78628+ voidptr = va_arg(ap, void *);
78629+ gr_log_middle_varargs(audit, msg, str1, voidptr);
78630+ break;
78631+ case GR_SIG2:
78632+ task = va_arg(ap, struct task_struct *);
78633+ cred = __task_cred(task);
78634+ pcred = __task_cred(task->real_parent);
78635+ num1 = va_arg(ap, int);
78636+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78637+ break;
78638+ case GR_CRASH1:
78639+ task = va_arg(ap, struct task_struct *);
78640+ cred = __task_cred(task);
78641+ pcred = __task_cred(task->real_parent);
78642+ ulong1 = va_arg(ap, unsigned long);
78643+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
78644+ break;
78645+ case GR_CRASH2:
78646+ task = va_arg(ap, struct task_struct *);
78647+ cred = __task_cred(task);
78648+ pcred = __task_cred(task->real_parent);
78649+ ulong1 = va_arg(ap, unsigned long);
78650+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
78651+ break;
78652+ case GR_RWXMAP:
78653+ file = va_arg(ap, struct file *);
78654+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
78655+ break;
78656+ case GR_RWXMAPVMA:
78657+ vma = va_arg(ap, struct vm_area_struct *);
78658+ if (vma->vm_file)
78659+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
78660+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
78661+ str1 = "<stack>";
78662+ else if (vma->vm_start <= current->mm->brk &&
78663+ vma->vm_end >= current->mm->start_brk)
78664+ str1 = "<heap>";
78665+ else
78666+ str1 = "<anonymous mapping>";
78667+ gr_log_middle_varargs(audit, msg, str1);
78668+ break;
78669+ case GR_PSACCT:
78670+ {
78671+ unsigned int wday, cday;
78672+ __u8 whr, chr;
78673+ __u8 wmin, cmin;
78674+ __u8 wsec, csec;
78675+ char cur_tty[64] = { 0 };
78676+ char parent_tty[64] = { 0 };
78677+
78678+ task = va_arg(ap, struct task_struct *);
78679+ wday = va_arg(ap, unsigned int);
78680+ cday = va_arg(ap, unsigned int);
78681+ whr = va_arg(ap, int);
78682+ chr = va_arg(ap, int);
78683+ wmin = va_arg(ap, int);
78684+ cmin = va_arg(ap, int);
78685+ wsec = va_arg(ap, int);
78686+ csec = va_arg(ap, int);
78687+ ulong1 = va_arg(ap, unsigned long);
78688+ cred = __task_cred(task);
78689+ pcred = __task_cred(task->real_parent);
78690+
78691+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78692+ }
78693+ break;
78694+ default:
78695+ gr_log_middle(audit, msg, ap);
78696+ }
78697+ va_end(ap);
78698+ // these don't need DEFAULTSECARGS printed on the end
78699+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
78700+ gr_log_end(audit, 0);
78701+ else
78702+ gr_log_end(audit, 1);
78703+ END_LOCKS(audit);
78704+}
78705diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
78706new file mode 100644
78707index 0000000..0e39d8c
78708--- /dev/null
78709+++ b/grsecurity/grsec_mem.c
78710@@ -0,0 +1,48 @@
78711+#include <linux/kernel.h>
78712+#include <linux/sched.h>
78713+#include <linux/mm.h>
78714+#include <linux/mman.h>
78715+#include <linux/module.h>
78716+#include <linux/grinternal.h>
78717+
78718+void gr_handle_msr_write(void)
78719+{
78720+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
78721+ return;
78722+}
78723+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
78724+
78725+void
78726+gr_handle_ioperm(void)
78727+{
78728+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
78729+ return;
78730+}
78731+
78732+void
78733+gr_handle_iopl(void)
78734+{
78735+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
78736+ return;
78737+}
78738+
78739+void
78740+gr_handle_mem_readwrite(u64 from, u64 to)
78741+{
78742+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
78743+ return;
78744+}
78745+
78746+void
78747+gr_handle_vm86(void)
78748+{
78749+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
78750+ return;
78751+}
78752+
78753+void
78754+gr_log_badprocpid(const char *entry)
78755+{
78756+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
78757+ return;
78758+}
78759diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
78760new file mode 100644
78761index 0000000..cd9e124
78762--- /dev/null
78763+++ b/grsecurity/grsec_mount.c
78764@@ -0,0 +1,65 @@
78765+#include <linux/kernel.h>
78766+#include <linux/sched.h>
78767+#include <linux/mount.h>
78768+#include <linux/major.h>
78769+#include <linux/grsecurity.h>
78770+#include <linux/grinternal.h>
78771+
78772+void
78773+gr_log_remount(const char *devname, const int retval)
78774+{
78775+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78776+ if (grsec_enable_mount && (retval >= 0))
78777+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
78778+#endif
78779+ return;
78780+}
78781+
78782+void
78783+gr_log_unmount(const char *devname, const int retval)
78784+{
78785+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78786+ if (grsec_enable_mount && (retval >= 0))
78787+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
78788+#endif
78789+ return;
78790+}
78791+
78792+void
78793+gr_log_mount(const char *from, const char *to, const int retval)
78794+{
78795+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78796+ if (grsec_enable_mount && (retval >= 0))
78797+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
78798+#endif
78799+ return;
78800+}
78801+
78802+int
78803+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
78804+{
78805+#ifdef CONFIG_GRKERNSEC_ROFS
78806+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
78807+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
78808+ return -EPERM;
78809+ } else
78810+ return 0;
78811+#endif
78812+ return 0;
78813+}
78814+
78815+int
78816+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
78817+{
78818+#ifdef CONFIG_GRKERNSEC_ROFS
78819+ struct inode *inode = dentry->d_inode;
78820+
78821+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
78822+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
78823+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
78824+ return -EPERM;
78825+ } else
78826+ return 0;
78827+#endif
78828+ return 0;
78829+}
78830diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
78831new file mode 100644
78832index 0000000..6ee9d50
78833--- /dev/null
78834+++ b/grsecurity/grsec_pax.c
78835@@ -0,0 +1,45 @@
78836+#include <linux/kernel.h>
78837+#include <linux/sched.h>
78838+#include <linux/mm.h>
78839+#include <linux/file.h>
78840+#include <linux/grinternal.h>
78841+#include <linux/grsecurity.h>
78842+
78843+void
78844+gr_log_textrel(struct vm_area_struct * vma)
78845+{
78846+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78847+ if (grsec_enable_log_rwxmaps)
78848+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
78849+#endif
78850+ return;
78851+}
78852+
78853+void gr_log_ptgnustack(struct file *file)
78854+{
78855+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78856+ if (grsec_enable_log_rwxmaps)
78857+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
78858+#endif
78859+ return;
78860+}
78861+
78862+void
78863+gr_log_rwxmmap(struct file *file)
78864+{
78865+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78866+ if (grsec_enable_log_rwxmaps)
78867+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
78868+#endif
78869+ return;
78870+}
78871+
78872+void
78873+gr_log_rwxmprotect(struct vm_area_struct *vma)
78874+{
78875+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78876+ if (grsec_enable_log_rwxmaps)
78877+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
78878+#endif
78879+ return;
78880+}
78881diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
78882new file mode 100644
78883index 0000000..2005a3a
78884--- /dev/null
78885+++ b/grsecurity/grsec_proc.c
78886@@ -0,0 +1,20 @@
78887+#include <linux/kernel.h>
78888+#include <linux/sched.h>
78889+#include <linux/grsecurity.h>
78890+#include <linux/grinternal.h>
78891+
78892+int gr_proc_is_restricted(void)
78893+{
78894+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78895+ const struct cred *cred = current_cred();
78896+#endif
78897+
78898+#ifdef CONFIG_GRKERNSEC_PROC_USER
78899+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
78900+ return -EACCES;
78901+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78902+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
78903+ return -EACCES;
78904+#endif
78905+ return 0;
78906+}
78907diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
78908new file mode 100644
78909index 0000000..f7f29aa
78910--- /dev/null
78911+++ b/grsecurity/grsec_ptrace.c
78912@@ -0,0 +1,30 @@
78913+#include <linux/kernel.h>
78914+#include <linux/sched.h>
78915+#include <linux/grinternal.h>
78916+#include <linux/security.h>
78917+
78918+void
78919+gr_audit_ptrace(struct task_struct *task)
78920+{
78921+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78922+ if (grsec_enable_audit_ptrace)
78923+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
78924+#endif
78925+ return;
78926+}
78927+
78928+int
78929+gr_ptrace_readexec(struct file *file, int unsafe_flags)
78930+{
78931+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78932+ const struct dentry *dentry = file->f_path.dentry;
78933+ const struct vfsmount *mnt = file->f_path.mnt;
78934+
78935+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
78936+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
78937+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
78938+ return -EACCES;
78939+ }
78940+#endif
78941+ return 0;
78942+}
78943diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
78944new file mode 100644
78945index 0000000..3860c7e
78946--- /dev/null
78947+++ b/grsecurity/grsec_sig.c
78948@@ -0,0 +1,236 @@
78949+#include <linux/kernel.h>
78950+#include <linux/sched.h>
78951+#include <linux/fs.h>
78952+#include <linux/delay.h>
78953+#include <linux/grsecurity.h>
78954+#include <linux/grinternal.h>
78955+#include <linux/hardirq.h>
78956+
78957+char *signames[] = {
78958+ [SIGSEGV] = "Segmentation fault",
78959+ [SIGILL] = "Illegal instruction",
78960+ [SIGABRT] = "Abort",
78961+ [SIGBUS] = "Invalid alignment/Bus error"
78962+};
78963+
78964+void
78965+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
78966+{
78967+#ifdef CONFIG_GRKERNSEC_SIGNAL
78968+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
78969+ (sig == SIGABRT) || (sig == SIGBUS))) {
78970+ if (task_pid_nr(t) == task_pid_nr(current)) {
78971+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
78972+ } else {
78973+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
78974+ }
78975+ }
78976+#endif
78977+ return;
78978+}
78979+
78980+int
78981+gr_handle_signal(const struct task_struct *p, const int sig)
78982+{
78983+#ifdef CONFIG_GRKERNSEC
78984+ /* ignore the 0 signal for protected task checks */
78985+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
78986+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
78987+ return -EPERM;
78988+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
78989+ return -EPERM;
78990+ }
78991+#endif
78992+ return 0;
78993+}
78994+
78995+#ifdef CONFIG_GRKERNSEC
78996+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
78997+
78998+int gr_fake_force_sig(int sig, struct task_struct *t)
78999+{
79000+ unsigned long int flags;
79001+ int ret, blocked, ignored;
79002+ struct k_sigaction *action;
79003+
79004+ spin_lock_irqsave(&t->sighand->siglock, flags);
79005+ action = &t->sighand->action[sig-1];
79006+ ignored = action->sa.sa_handler == SIG_IGN;
79007+ blocked = sigismember(&t->blocked, sig);
79008+ if (blocked || ignored) {
79009+ action->sa.sa_handler = SIG_DFL;
79010+ if (blocked) {
79011+ sigdelset(&t->blocked, sig);
79012+ recalc_sigpending_and_wake(t);
79013+ }
79014+ }
79015+ if (action->sa.sa_handler == SIG_DFL)
79016+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
79017+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
79018+
79019+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
79020+
79021+ return ret;
79022+}
79023+#endif
79024+
79025+#define GR_USER_BAN_TIME (15 * 60)
79026+#define GR_DAEMON_BRUTE_TIME (30 * 60)
79027+
79028+void gr_handle_brute_attach(int dumpable)
79029+{
79030+#ifdef CONFIG_GRKERNSEC_BRUTE
79031+ struct task_struct *p = current;
79032+ kuid_t uid = GLOBAL_ROOT_UID;
79033+ int daemon = 0;
79034+
79035+ if (!grsec_enable_brute)
79036+ return;
79037+
79038+ rcu_read_lock();
79039+ read_lock(&tasklist_lock);
79040+ read_lock(&grsec_exec_file_lock);
79041+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
79042+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
79043+ p->real_parent->brute = 1;
79044+ daemon = 1;
79045+ } else {
79046+ const struct cred *cred = __task_cred(p), *cred2;
79047+ struct task_struct *tsk, *tsk2;
79048+
79049+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
79050+ struct user_struct *user;
79051+
79052+ uid = cred->uid;
79053+
79054+ /* this is put upon execution past expiration */
79055+ user = find_user(uid);
79056+ if (user == NULL)
79057+ goto unlock;
79058+ user->suid_banned = 1;
79059+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
79060+ if (user->suid_ban_expires == ~0UL)
79061+ user->suid_ban_expires--;
79062+
79063+ /* only kill other threads of the same binary, from the same user */
79064+ do_each_thread(tsk2, tsk) {
79065+ cred2 = __task_cred(tsk);
79066+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
79067+ gr_fake_force_sig(SIGKILL, tsk);
79068+ } while_each_thread(tsk2, tsk);
79069+ }
79070+ }
79071+unlock:
79072+ read_unlock(&grsec_exec_file_lock);
79073+ read_unlock(&tasklist_lock);
79074+ rcu_read_unlock();
79075+
79076+ if (gr_is_global_nonroot(uid))
79077+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
79078+ else if (daemon)
79079+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
79080+
79081+#endif
79082+ return;
79083+}
79084+
79085+void gr_handle_brute_check(void)
79086+{
79087+#ifdef CONFIG_GRKERNSEC_BRUTE
79088+ struct task_struct *p = current;
79089+
79090+ if (unlikely(p->brute)) {
79091+ if (!grsec_enable_brute)
79092+ p->brute = 0;
79093+ else if (time_before(get_seconds(), p->brute_expires))
79094+ msleep(30 * 1000);
79095+ }
79096+#endif
79097+ return;
79098+}
79099+
79100+void gr_handle_kernel_exploit(void)
79101+{
79102+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79103+ const struct cred *cred;
79104+ struct task_struct *tsk, *tsk2;
79105+ struct user_struct *user;
79106+ kuid_t uid;
79107+
79108+ if (in_irq() || in_serving_softirq() || in_nmi())
79109+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
79110+
79111+ uid = current_uid();
79112+
79113+ if (gr_is_global_root(uid))
79114+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
79115+ else {
79116+ /* kill all the processes of this user, hold a reference
79117+ to their creds struct, and prevent them from creating
79118+ another process until system reset
79119+ */
79120+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
79121+ GR_GLOBAL_UID(uid));
79122+ /* we intentionally leak this ref */
79123+ user = get_uid(current->cred->user);
79124+ if (user)
79125+ user->kernel_banned = 1;
79126+
79127+ /* kill all processes of this user */
79128+ read_lock(&tasklist_lock);
79129+ do_each_thread(tsk2, tsk) {
79130+ cred = __task_cred(tsk);
79131+ if (uid_eq(cred->uid, uid))
79132+ gr_fake_force_sig(SIGKILL, tsk);
79133+ } while_each_thread(tsk2, tsk);
79134+ read_unlock(&tasklist_lock);
79135+ }
79136+#endif
79137+}
79138+
79139+#ifdef CONFIG_GRKERNSEC_BRUTE
79140+static bool suid_ban_expired(struct user_struct *user)
79141+{
79142+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
79143+ user->suid_banned = 0;
79144+ user->suid_ban_expires = 0;
79145+ free_uid(user);
79146+ return true;
79147+ }
79148+
79149+ return false;
79150+}
79151+#endif
79152+
79153+int gr_process_kernel_exec_ban(void)
79154+{
79155+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79156+ if (unlikely(current->cred->user->kernel_banned))
79157+ return -EPERM;
79158+#endif
79159+ return 0;
79160+}
79161+
79162+int gr_process_kernel_setuid_ban(struct user_struct *user)
79163+{
79164+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79165+ if (unlikely(user->kernel_banned))
79166+ gr_fake_force_sig(SIGKILL, current);
79167+#endif
79168+ return 0;
79169+}
79170+
79171+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
79172+{
79173+#ifdef CONFIG_GRKERNSEC_BRUTE
79174+ struct user_struct *user = current->cred->user;
79175+ if (unlikely(user->suid_banned)) {
79176+ if (suid_ban_expired(user))
79177+ return 0;
79178+ /* disallow execution of suid binaries only */
79179+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
79180+ return -EPERM;
79181+ }
79182+#endif
79183+ return 0;
79184+}
79185diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
79186new file mode 100644
79187index 0000000..c0aef3a
79188--- /dev/null
79189+++ b/grsecurity/grsec_sock.c
79190@@ -0,0 +1,244 @@
79191+#include <linux/kernel.h>
79192+#include <linux/module.h>
79193+#include <linux/sched.h>
79194+#include <linux/file.h>
79195+#include <linux/net.h>
79196+#include <linux/in.h>
79197+#include <linux/ip.h>
79198+#include <net/sock.h>
79199+#include <net/inet_sock.h>
79200+#include <linux/grsecurity.h>
79201+#include <linux/grinternal.h>
79202+#include <linux/gracl.h>
79203+
79204+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
79205+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
79206+
79207+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
79208+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
79209+
79210+#ifdef CONFIG_UNIX_MODULE
79211+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
79212+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
79213+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
79214+EXPORT_SYMBOL_GPL(gr_handle_create);
79215+#endif
79216+
79217+#ifdef CONFIG_GRKERNSEC
79218+#define gr_conn_table_size 32749
79219+struct conn_table_entry {
79220+ struct conn_table_entry *next;
79221+ struct signal_struct *sig;
79222+};
79223+
79224+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
79225+DEFINE_SPINLOCK(gr_conn_table_lock);
79226+
79227+extern const char * gr_socktype_to_name(unsigned char type);
79228+extern const char * gr_proto_to_name(unsigned char proto);
79229+extern const char * gr_sockfamily_to_name(unsigned char family);
79230+
79231+static __inline__ int
79232+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
79233+{
79234+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
79235+}
79236+
79237+static __inline__ int
79238+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
79239+ __u16 sport, __u16 dport)
79240+{
79241+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
79242+ sig->gr_sport == sport && sig->gr_dport == dport))
79243+ return 1;
79244+ else
79245+ return 0;
79246+}
79247+
79248+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
79249+{
79250+ struct conn_table_entry **match;
79251+ unsigned int index;
79252+
79253+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
79254+ sig->gr_sport, sig->gr_dport,
79255+ gr_conn_table_size);
79256+
79257+ newent->sig = sig;
79258+
79259+ match = &gr_conn_table[index];
79260+ newent->next = *match;
79261+ *match = newent;
79262+
79263+ return;
79264+}
79265+
79266+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
79267+{
79268+ struct conn_table_entry *match, *last = NULL;
79269+ unsigned int index;
79270+
79271+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
79272+ sig->gr_sport, sig->gr_dport,
79273+ gr_conn_table_size);
79274+
79275+ match = gr_conn_table[index];
79276+ while (match && !conn_match(match->sig,
79277+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
79278+ sig->gr_dport)) {
79279+ last = match;
79280+ match = match->next;
79281+ }
79282+
79283+ if (match) {
79284+ if (last)
79285+ last->next = match->next;
79286+ else
79287+ gr_conn_table[index] = NULL;
79288+ kfree(match);
79289+ }
79290+
79291+ return;
79292+}
79293+
79294+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
79295+ __u16 sport, __u16 dport)
79296+{
79297+ struct conn_table_entry *match;
79298+ unsigned int index;
79299+
79300+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
79301+
79302+ match = gr_conn_table[index];
79303+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
79304+ match = match->next;
79305+
79306+ if (match)
79307+ return match->sig;
79308+ else
79309+ return NULL;
79310+}
79311+
79312+#endif
79313+
79314+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
79315+{
79316+#ifdef CONFIG_GRKERNSEC
79317+ struct signal_struct *sig = task->signal;
79318+ struct conn_table_entry *newent;
79319+
79320+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
79321+ if (newent == NULL)
79322+ return;
79323+ /* no bh lock needed since we are called with bh disabled */
79324+ spin_lock(&gr_conn_table_lock);
79325+ gr_del_task_from_ip_table_nolock(sig);
79326+ sig->gr_saddr = inet->inet_rcv_saddr;
79327+ sig->gr_daddr = inet->inet_daddr;
79328+ sig->gr_sport = inet->inet_sport;
79329+ sig->gr_dport = inet->inet_dport;
79330+ gr_add_to_task_ip_table_nolock(sig, newent);
79331+ spin_unlock(&gr_conn_table_lock);
79332+#endif
79333+ return;
79334+}
79335+
79336+void gr_del_task_from_ip_table(struct task_struct *task)
79337+{
79338+#ifdef CONFIG_GRKERNSEC
79339+ spin_lock_bh(&gr_conn_table_lock);
79340+ gr_del_task_from_ip_table_nolock(task->signal);
79341+ spin_unlock_bh(&gr_conn_table_lock);
79342+#endif
79343+ return;
79344+}
79345+
79346+void
79347+gr_attach_curr_ip(const struct sock *sk)
79348+{
79349+#ifdef CONFIG_GRKERNSEC
79350+ struct signal_struct *p, *set;
79351+ const struct inet_sock *inet = inet_sk(sk);
79352+
79353+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
79354+ return;
79355+
79356+ set = current->signal;
79357+
79358+ spin_lock_bh(&gr_conn_table_lock);
79359+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
79360+ inet->inet_dport, inet->inet_sport);
79361+ if (unlikely(p != NULL)) {
79362+ set->curr_ip = p->curr_ip;
79363+ set->used_accept = 1;
79364+ gr_del_task_from_ip_table_nolock(p);
79365+ spin_unlock_bh(&gr_conn_table_lock);
79366+ return;
79367+ }
79368+ spin_unlock_bh(&gr_conn_table_lock);
79369+
79370+ set->curr_ip = inet->inet_daddr;
79371+ set->used_accept = 1;
79372+#endif
79373+ return;
79374+}
79375+
79376+int
79377+gr_handle_sock_all(const int family, const int type, const int protocol)
79378+{
79379+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
79380+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
79381+ (family != AF_UNIX)) {
79382+ if (family == AF_INET)
79383+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
79384+ else
79385+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
79386+ return -EACCES;
79387+ }
79388+#endif
79389+ return 0;
79390+}
79391+
79392+int
79393+gr_handle_sock_server(const struct sockaddr *sck)
79394+{
79395+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79396+ if (grsec_enable_socket_server &&
79397+ in_group_p(grsec_socket_server_gid) &&
79398+ sck && (sck->sa_family != AF_UNIX) &&
79399+ (sck->sa_family != AF_LOCAL)) {
79400+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
79401+ return -EACCES;
79402+ }
79403+#endif
79404+ return 0;
79405+}
79406+
79407+int
79408+gr_handle_sock_server_other(const struct sock *sck)
79409+{
79410+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79411+ if (grsec_enable_socket_server &&
79412+ in_group_p(grsec_socket_server_gid) &&
79413+ sck && (sck->sk_family != AF_UNIX) &&
79414+ (sck->sk_family != AF_LOCAL)) {
79415+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
79416+ return -EACCES;
79417+ }
79418+#endif
79419+ return 0;
79420+}
79421+
79422+int
79423+gr_handle_sock_client(const struct sockaddr *sck)
79424+{
79425+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
79426+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
79427+ sck && (sck->sa_family != AF_UNIX) &&
79428+ (sck->sa_family != AF_LOCAL)) {
79429+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
79430+ return -EACCES;
79431+ }
79432+#endif
79433+ return 0;
79434+}
79435diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
79436new file mode 100644
79437index 0000000..8159888
79438--- /dev/null
79439+++ b/grsecurity/grsec_sysctl.c
79440@@ -0,0 +1,479 @@
79441+#include <linux/kernel.h>
79442+#include <linux/sched.h>
79443+#include <linux/sysctl.h>
79444+#include <linux/grsecurity.h>
79445+#include <linux/grinternal.h>
79446+
79447+int
79448+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
79449+{
79450+#ifdef CONFIG_GRKERNSEC_SYSCTL
79451+ if (dirname == NULL || name == NULL)
79452+ return 0;
79453+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
79454+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
79455+ return -EACCES;
79456+ }
79457+#endif
79458+ return 0;
79459+}
79460+
79461+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
79462+static int __maybe_unused __read_only one = 1;
79463+#endif
79464+
79465+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
79466+ defined(CONFIG_GRKERNSEC_DENYUSB)
79467+struct ctl_table grsecurity_table[] = {
79468+#ifdef CONFIG_GRKERNSEC_SYSCTL
79469+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
79470+#ifdef CONFIG_GRKERNSEC_IO
79471+ {
79472+ .procname = "disable_priv_io",
79473+ .data = &grsec_disable_privio,
79474+ .maxlen = sizeof(int),
79475+ .mode = 0600,
79476+ .proc_handler = &proc_dointvec,
79477+ },
79478+#endif
79479+#endif
79480+#ifdef CONFIG_GRKERNSEC_LINK
79481+ {
79482+ .procname = "linking_restrictions",
79483+ .data = &grsec_enable_link,
79484+ .maxlen = sizeof(int),
79485+ .mode = 0600,
79486+ .proc_handler = &proc_dointvec,
79487+ },
79488+#endif
79489+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
79490+ {
79491+ .procname = "enforce_symlinksifowner",
79492+ .data = &grsec_enable_symlinkown,
79493+ .maxlen = sizeof(int),
79494+ .mode = 0600,
79495+ .proc_handler = &proc_dointvec,
79496+ },
79497+ {
79498+ .procname = "symlinkown_gid",
79499+ .data = &grsec_symlinkown_gid,
79500+ .maxlen = sizeof(int),
79501+ .mode = 0600,
79502+ .proc_handler = &proc_dointvec,
79503+ },
79504+#endif
79505+#ifdef CONFIG_GRKERNSEC_BRUTE
79506+ {
79507+ .procname = "deter_bruteforce",
79508+ .data = &grsec_enable_brute,
79509+ .maxlen = sizeof(int),
79510+ .mode = 0600,
79511+ .proc_handler = &proc_dointvec,
79512+ },
79513+#endif
79514+#ifdef CONFIG_GRKERNSEC_FIFO
79515+ {
79516+ .procname = "fifo_restrictions",
79517+ .data = &grsec_enable_fifo,
79518+ .maxlen = sizeof(int),
79519+ .mode = 0600,
79520+ .proc_handler = &proc_dointvec,
79521+ },
79522+#endif
79523+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
79524+ {
79525+ .procname = "ptrace_readexec",
79526+ .data = &grsec_enable_ptrace_readexec,
79527+ .maxlen = sizeof(int),
79528+ .mode = 0600,
79529+ .proc_handler = &proc_dointvec,
79530+ },
79531+#endif
79532+#ifdef CONFIG_GRKERNSEC_SETXID
79533+ {
79534+ .procname = "consistent_setxid",
79535+ .data = &grsec_enable_setxid,
79536+ .maxlen = sizeof(int),
79537+ .mode = 0600,
79538+ .proc_handler = &proc_dointvec,
79539+ },
79540+#endif
79541+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79542+ {
79543+ .procname = "ip_blackhole",
79544+ .data = &grsec_enable_blackhole,
79545+ .maxlen = sizeof(int),
79546+ .mode = 0600,
79547+ .proc_handler = &proc_dointvec,
79548+ },
79549+ {
79550+ .procname = "lastack_retries",
79551+ .data = &grsec_lastack_retries,
79552+ .maxlen = sizeof(int),
79553+ .mode = 0600,
79554+ .proc_handler = &proc_dointvec,
79555+ },
79556+#endif
79557+#ifdef CONFIG_GRKERNSEC_EXECLOG
79558+ {
79559+ .procname = "exec_logging",
79560+ .data = &grsec_enable_execlog,
79561+ .maxlen = sizeof(int),
79562+ .mode = 0600,
79563+ .proc_handler = &proc_dointvec,
79564+ },
79565+#endif
79566+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79567+ {
79568+ .procname = "rwxmap_logging",
79569+ .data = &grsec_enable_log_rwxmaps,
79570+ .maxlen = sizeof(int),
79571+ .mode = 0600,
79572+ .proc_handler = &proc_dointvec,
79573+ },
79574+#endif
79575+#ifdef CONFIG_GRKERNSEC_SIGNAL
79576+ {
79577+ .procname = "signal_logging",
79578+ .data = &grsec_enable_signal,
79579+ .maxlen = sizeof(int),
79580+ .mode = 0600,
79581+ .proc_handler = &proc_dointvec,
79582+ },
79583+#endif
79584+#ifdef CONFIG_GRKERNSEC_FORKFAIL
79585+ {
79586+ .procname = "forkfail_logging",
79587+ .data = &grsec_enable_forkfail,
79588+ .maxlen = sizeof(int),
79589+ .mode = 0600,
79590+ .proc_handler = &proc_dointvec,
79591+ },
79592+#endif
79593+#ifdef CONFIG_GRKERNSEC_TIME
79594+ {
79595+ .procname = "timechange_logging",
79596+ .data = &grsec_enable_time,
79597+ .maxlen = sizeof(int),
79598+ .mode = 0600,
79599+ .proc_handler = &proc_dointvec,
79600+ },
79601+#endif
79602+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
79603+ {
79604+ .procname = "chroot_deny_shmat",
79605+ .data = &grsec_enable_chroot_shmat,
79606+ .maxlen = sizeof(int),
79607+ .mode = 0600,
79608+ .proc_handler = &proc_dointvec,
79609+ },
79610+#endif
79611+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
79612+ {
79613+ .procname = "chroot_deny_unix",
79614+ .data = &grsec_enable_chroot_unix,
79615+ .maxlen = sizeof(int),
79616+ .mode = 0600,
79617+ .proc_handler = &proc_dointvec,
79618+ },
79619+#endif
79620+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
79621+ {
79622+ .procname = "chroot_deny_mount",
79623+ .data = &grsec_enable_chroot_mount,
79624+ .maxlen = sizeof(int),
79625+ .mode = 0600,
79626+ .proc_handler = &proc_dointvec,
79627+ },
79628+#endif
79629+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
79630+ {
79631+ .procname = "chroot_deny_fchdir",
79632+ .data = &grsec_enable_chroot_fchdir,
79633+ .maxlen = sizeof(int),
79634+ .mode = 0600,
79635+ .proc_handler = &proc_dointvec,
79636+ },
79637+#endif
79638+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
79639+ {
79640+ .procname = "chroot_deny_chroot",
79641+ .data = &grsec_enable_chroot_double,
79642+ .maxlen = sizeof(int),
79643+ .mode = 0600,
79644+ .proc_handler = &proc_dointvec,
79645+ },
79646+#endif
79647+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
79648+ {
79649+ .procname = "chroot_deny_pivot",
79650+ .data = &grsec_enable_chroot_pivot,
79651+ .maxlen = sizeof(int),
79652+ .mode = 0600,
79653+ .proc_handler = &proc_dointvec,
79654+ },
79655+#endif
79656+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
79657+ {
79658+ .procname = "chroot_enforce_chdir",
79659+ .data = &grsec_enable_chroot_chdir,
79660+ .maxlen = sizeof(int),
79661+ .mode = 0600,
79662+ .proc_handler = &proc_dointvec,
79663+ },
79664+#endif
79665+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
79666+ {
79667+ .procname = "chroot_deny_chmod",
79668+ .data = &grsec_enable_chroot_chmod,
79669+ .maxlen = sizeof(int),
79670+ .mode = 0600,
79671+ .proc_handler = &proc_dointvec,
79672+ },
79673+#endif
79674+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
79675+ {
79676+ .procname = "chroot_deny_mknod",
79677+ .data = &grsec_enable_chroot_mknod,
79678+ .maxlen = sizeof(int),
79679+ .mode = 0600,
79680+ .proc_handler = &proc_dointvec,
79681+ },
79682+#endif
79683+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
79684+ {
79685+ .procname = "chroot_restrict_nice",
79686+ .data = &grsec_enable_chroot_nice,
79687+ .maxlen = sizeof(int),
79688+ .mode = 0600,
79689+ .proc_handler = &proc_dointvec,
79690+ },
79691+#endif
79692+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
79693+ {
79694+ .procname = "chroot_execlog",
79695+ .data = &grsec_enable_chroot_execlog,
79696+ .maxlen = sizeof(int),
79697+ .mode = 0600,
79698+ .proc_handler = &proc_dointvec,
79699+ },
79700+#endif
79701+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
79702+ {
79703+ .procname = "chroot_caps",
79704+ .data = &grsec_enable_chroot_caps,
79705+ .maxlen = sizeof(int),
79706+ .mode = 0600,
79707+ .proc_handler = &proc_dointvec,
79708+ },
79709+#endif
79710+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
79711+ {
79712+ .procname = "chroot_deny_sysctl",
79713+ .data = &grsec_enable_chroot_sysctl,
79714+ .maxlen = sizeof(int),
79715+ .mode = 0600,
79716+ .proc_handler = &proc_dointvec,
79717+ },
79718+#endif
79719+#ifdef CONFIG_GRKERNSEC_TPE
79720+ {
79721+ .procname = "tpe",
79722+ .data = &grsec_enable_tpe,
79723+ .maxlen = sizeof(int),
79724+ .mode = 0600,
79725+ .proc_handler = &proc_dointvec,
79726+ },
79727+ {
79728+ .procname = "tpe_gid",
79729+ .data = &grsec_tpe_gid,
79730+ .maxlen = sizeof(int),
79731+ .mode = 0600,
79732+ .proc_handler = &proc_dointvec,
79733+ },
79734+#endif
79735+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79736+ {
79737+ .procname = "tpe_invert",
79738+ .data = &grsec_enable_tpe_invert,
79739+ .maxlen = sizeof(int),
79740+ .mode = 0600,
79741+ .proc_handler = &proc_dointvec,
79742+ },
79743+#endif
79744+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79745+ {
79746+ .procname = "tpe_restrict_all",
79747+ .data = &grsec_enable_tpe_all,
79748+ .maxlen = sizeof(int),
79749+ .mode = 0600,
79750+ .proc_handler = &proc_dointvec,
79751+ },
79752+#endif
79753+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
79754+ {
79755+ .procname = "socket_all",
79756+ .data = &grsec_enable_socket_all,
79757+ .maxlen = sizeof(int),
79758+ .mode = 0600,
79759+ .proc_handler = &proc_dointvec,
79760+ },
79761+ {
79762+ .procname = "socket_all_gid",
79763+ .data = &grsec_socket_all_gid,
79764+ .maxlen = sizeof(int),
79765+ .mode = 0600,
79766+ .proc_handler = &proc_dointvec,
79767+ },
79768+#endif
79769+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
79770+ {
79771+ .procname = "socket_client",
79772+ .data = &grsec_enable_socket_client,
79773+ .maxlen = sizeof(int),
79774+ .mode = 0600,
79775+ .proc_handler = &proc_dointvec,
79776+ },
79777+ {
79778+ .procname = "socket_client_gid",
79779+ .data = &grsec_socket_client_gid,
79780+ .maxlen = sizeof(int),
79781+ .mode = 0600,
79782+ .proc_handler = &proc_dointvec,
79783+ },
79784+#endif
79785+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79786+ {
79787+ .procname = "socket_server",
79788+ .data = &grsec_enable_socket_server,
79789+ .maxlen = sizeof(int),
79790+ .mode = 0600,
79791+ .proc_handler = &proc_dointvec,
79792+ },
79793+ {
79794+ .procname = "socket_server_gid",
79795+ .data = &grsec_socket_server_gid,
79796+ .maxlen = sizeof(int),
79797+ .mode = 0600,
79798+ .proc_handler = &proc_dointvec,
79799+ },
79800+#endif
79801+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
79802+ {
79803+ .procname = "audit_group",
79804+ .data = &grsec_enable_group,
79805+ .maxlen = sizeof(int),
79806+ .mode = 0600,
79807+ .proc_handler = &proc_dointvec,
79808+ },
79809+ {
79810+ .procname = "audit_gid",
79811+ .data = &grsec_audit_gid,
79812+ .maxlen = sizeof(int),
79813+ .mode = 0600,
79814+ .proc_handler = &proc_dointvec,
79815+ },
79816+#endif
79817+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
79818+ {
79819+ .procname = "audit_chdir",
79820+ .data = &grsec_enable_chdir,
79821+ .maxlen = sizeof(int),
79822+ .mode = 0600,
79823+ .proc_handler = &proc_dointvec,
79824+ },
79825+#endif
79826+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79827+ {
79828+ .procname = "audit_mount",
79829+ .data = &grsec_enable_mount,
79830+ .maxlen = sizeof(int),
79831+ .mode = 0600,
79832+ .proc_handler = &proc_dointvec,
79833+ },
79834+#endif
79835+#ifdef CONFIG_GRKERNSEC_DMESG
79836+ {
79837+ .procname = "dmesg",
79838+ .data = &grsec_enable_dmesg,
79839+ .maxlen = sizeof(int),
79840+ .mode = 0600,
79841+ .proc_handler = &proc_dointvec,
79842+ },
79843+#endif
79844+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
79845+ {
79846+ .procname = "chroot_findtask",
79847+ .data = &grsec_enable_chroot_findtask,
79848+ .maxlen = sizeof(int),
79849+ .mode = 0600,
79850+ .proc_handler = &proc_dointvec,
79851+ },
79852+#endif
79853+#ifdef CONFIG_GRKERNSEC_RESLOG
79854+ {
79855+ .procname = "resource_logging",
79856+ .data = &grsec_resource_logging,
79857+ .maxlen = sizeof(int),
79858+ .mode = 0600,
79859+ .proc_handler = &proc_dointvec,
79860+ },
79861+#endif
79862+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
79863+ {
79864+ .procname = "audit_ptrace",
79865+ .data = &grsec_enable_audit_ptrace,
79866+ .maxlen = sizeof(int),
79867+ .mode = 0600,
79868+ .proc_handler = &proc_dointvec,
79869+ },
79870+#endif
79871+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
79872+ {
79873+ .procname = "harden_ptrace",
79874+ .data = &grsec_enable_harden_ptrace,
79875+ .maxlen = sizeof(int),
79876+ .mode = 0600,
79877+ .proc_handler = &proc_dointvec,
79878+ },
79879+#endif
79880+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
79881+ {
79882+ .procname = "harden_ipc",
79883+ .data = &grsec_enable_harden_ipc,
79884+ .maxlen = sizeof(int),
79885+ .mode = 0600,
79886+ .proc_handler = &proc_dointvec,
79887+ },
79888+#endif
79889+ {
79890+ .procname = "grsec_lock",
79891+ .data = &grsec_lock,
79892+ .maxlen = sizeof(int),
79893+ .mode = 0600,
79894+ .proc_handler = &proc_dointvec,
79895+ },
79896+#endif
79897+#ifdef CONFIG_GRKERNSEC_ROFS
79898+ {
79899+ .procname = "romount_protect",
79900+ .data = &grsec_enable_rofs,
79901+ .maxlen = sizeof(int),
79902+ .mode = 0600,
79903+ .proc_handler = &proc_dointvec_minmax,
79904+ .extra1 = &one,
79905+ .extra2 = &one,
79906+ },
79907+#endif
79908+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
79909+ {
79910+ .procname = "deny_new_usb",
79911+ .data = &grsec_deny_new_usb,
79912+ .maxlen = sizeof(int),
79913+ .mode = 0600,
79914+ .proc_handler = &proc_dointvec,
79915+ },
79916+#endif
79917+ { }
79918+};
79919+#endif
79920diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
79921new file mode 100644
79922index 0000000..61b514e
79923--- /dev/null
79924+++ b/grsecurity/grsec_time.c
79925@@ -0,0 +1,16 @@
79926+#include <linux/kernel.h>
79927+#include <linux/sched.h>
79928+#include <linux/grinternal.h>
79929+#include <linux/module.h>
79930+
79931+void
79932+gr_log_timechange(void)
79933+{
79934+#ifdef CONFIG_GRKERNSEC_TIME
79935+ if (grsec_enable_time)
79936+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
79937+#endif
79938+ return;
79939+}
79940+
79941+EXPORT_SYMBOL_GPL(gr_log_timechange);
79942diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
79943new file mode 100644
79944index 0000000..d1953de
79945--- /dev/null
79946+++ b/grsecurity/grsec_tpe.c
79947@@ -0,0 +1,78 @@
79948+#include <linux/kernel.h>
79949+#include <linux/sched.h>
79950+#include <linux/file.h>
79951+#include <linux/fs.h>
79952+#include <linux/grinternal.h>
79953+
79954+extern int gr_acl_tpe_check(void);
79955+
79956+int
79957+gr_tpe_allow(const struct file *file)
79958+{
79959+#ifdef CONFIG_GRKERNSEC
79960+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
79961+ struct inode *file_inode = file->f_path.dentry->d_inode;
79962+ const struct cred *cred = current_cred();
79963+ char *msg = NULL;
79964+ char *msg2 = NULL;
79965+
79966+ // never restrict root
79967+ if (gr_is_global_root(cred->uid))
79968+ return 1;
79969+
79970+ if (grsec_enable_tpe) {
79971+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79972+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
79973+ msg = "not being in trusted group";
79974+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
79975+ msg = "being in untrusted group";
79976+#else
79977+ if (in_group_p(grsec_tpe_gid))
79978+ msg = "being in untrusted group";
79979+#endif
79980+ }
79981+ if (!msg && gr_acl_tpe_check())
79982+ msg = "being in untrusted role";
79983+
79984+ // not in any affected group/role
79985+ if (!msg)
79986+ goto next_check;
79987+
79988+ if (gr_is_global_nonroot(inode->i_uid))
79989+ msg2 = "file in non-root-owned directory";
79990+ else if (inode->i_mode & S_IWOTH)
79991+ msg2 = "file in world-writable directory";
79992+ else if (inode->i_mode & S_IWGRP)
79993+ msg2 = "file in group-writable directory";
79994+ else if (file_inode->i_mode & S_IWOTH)
79995+ msg2 = "file is world-writable";
79996+
79997+ if (msg && msg2) {
79998+ char fullmsg[70] = {0};
79999+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
80000+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
80001+ return 0;
80002+ }
80003+ msg = NULL;
80004+next_check:
80005+#ifdef CONFIG_GRKERNSEC_TPE_ALL
80006+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
80007+ return 1;
80008+
80009+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
80010+ msg = "directory not owned by user";
80011+ else if (inode->i_mode & S_IWOTH)
80012+ msg = "file in world-writable directory";
80013+ else if (inode->i_mode & S_IWGRP)
80014+ msg = "file in group-writable directory";
80015+ else if (file_inode->i_mode & S_IWOTH)
80016+ msg = "file is world-writable";
80017+
80018+ if (msg) {
80019+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
80020+ return 0;
80021+ }
80022+#endif
80023+#endif
80024+ return 1;
80025+}
80026diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
80027new file mode 100644
80028index 0000000..ae02d8e
80029--- /dev/null
80030+++ b/grsecurity/grsec_usb.c
80031@@ -0,0 +1,15 @@
80032+#include <linux/kernel.h>
80033+#include <linux/grinternal.h>
80034+#include <linux/module.h>
80035+
80036+int gr_handle_new_usb(void)
80037+{
80038+#ifdef CONFIG_GRKERNSEC_DENYUSB
80039+ if (grsec_deny_new_usb) {
80040+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
80041+ return 1;
80042+ }
80043+#endif
80044+ return 0;
80045+}
80046+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
80047diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
80048new file mode 100644
80049index 0000000..158b330
80050--- /dev/null
80051+++ b/grsecurity/grsum.c
80052@@ -0,0 +1,64 @@
80053+#include <linux/err.h>
80054+#include <linux/kernel.h>
80055+#include <linux/sched.h>
80056+#include <linux/mm.h>
80057+#include <linux/scatterlist.h>
80058+#include <linux/crypto.h>
80059+#include <linux/gracl.h>
80060+
80061+
80062+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
80063+#error "crypto and sha256 must be built into the kernel"
80064+#endif
80065+
80066+int
80067+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
80068+{
80069+ struct crypto_hash *tfm;
80070+ struct hash_desc desc;
80071+ struct scatterlist sg[2];
80072+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
80073+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
80074+ unsigned long *sumptr = (unsigned long *)sum;
80075+ int cryptres;
80076+ int retval = 1;
80077+ volatile int mismatched = 0;
80078+ volatile int dummy = 0;
80079+ unsigned int i;
80080+
80081+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
80082+ if (IS_ERR(tfm)) {
80083+ /* should never happen, since sha256 should be built in */
80084+ memset(entry->pw, 0, GR_PW_LEN);
80085+ return 1;
80086+ }
80087+
80088+ sg_init_table(sg, 2);
80089+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
80090+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
80091+
80092+ desc.tfm = tfm;
80093+ desc.flags = 0;
80094+
80095+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
80096+ temp_sum);
80097+
80098+ memset(entry->pw, 0, GR_PW_LEN);
80099+
80100+ if (cryptres)
80101+ goto out;
80102+
80103+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
80104+ if (sumptr[i] != tmpsumptr[i])
80105+ mismatched = 1;
80106+ else
80107+ dummy = 1; // waste a cycle
80108+
80109+ if (!mismatched)
80110+ retval = dummy - 1;
80111+
80112+out:
80113+ crypto_free_hash(tfm);
80114+
80115+ return retval;
80116+}
80117diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
80118index 77ff547..181834f 100644
80119--- a/include/asm-generic/4level-fixup.h
80120+++ b/include/asm-generic/4level-fixup.h
80121@@ -13,8 +13,10 @@
80122 #define pmd_alloc(mm, pud, address) \
80123 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
80124 NULL: pmd_offset(pud, address))
80125+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
80126
80127 #define pud_alloc(mm, pgd, address) (pgd)
80128+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
80129 #define pud_offset(pgd, start) (pgd)
80130 #define pud_none(pud) 0
80131 #define pud_bad(pud) 0
80132diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
80133index b7babf0..97f4c4f 100644
80134--- a/include/asm-generic/atomic-long.h
80135+++ b/include/asm-generic/atomic-long.h
80136@@ -22,6 +22,12 @@
80137
80138 typedef atomic64_t atomic_long_t;
80139
80140+#ifdef CONFIG_PAX_REFCOUNT
80141+typedef atomic64_unchecked_t atomic_long_unchecked_t;
80142+#else
80143+typedef atomic64_t atomic_long_unchecked_t;
80144+#endif
80145+
80146 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
80147
80148 static inline long atomic_long_read(atomic_long_t *l)
80149@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
80150 return (long)atomic64_read(v);
80151 }
80152
80153+#ifdef CONFIG_PAX_REFCOUNT
80154+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
80155+{
80156+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80157+
80158+ return (long)atomic64_read_unchecked(v);
80159+}
80160+#endif
80161+
80162 static inline void atomic_long_set(atomic_long_t *l, long i)
80163 {
80164 atomic64_t *v = (atomic64_t *)l;
80165@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
80166 atomic64_set(v, i);
80167 }
80168
80169+#ifdef CONFIG_PAX_REFCOUNT
80170+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
80171+{
80172+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80173+
80174+ atomic64_set_unchecked(v, i);
80175+}
80176+#endif
80177+
80178 static inline void atomic_long_inc(atomic_long_t *l)
80179 {
80180 atomic64_t *v = (atomic64_t *)l;
80181@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
80182 atomic64_inc(v);
80183 }
80184
80185+#ifdef CONFIG_PAX_REFCOUNT
80186+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
80187+{
80188+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80189+
80190+ atomic64_inc_unchecked(v);
80191+}
80192+#endif
80193+
80194 static inline void atomic_long_dec(atomic_long_t *l)
80195 {
80196 atomic64_t *v = (atomic64_t *)l;
80197@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
80198 atomic64_dec(v);
80199 }
80200
80201+#ifdef CONFIG_PAX_REFCOUNT
80202+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
80203+{
80204+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80205+
80206+ atomic64_dec_unchecked(v);
80207+}
80208+#endif
80209+
80210 static inline void atomic_long_add(long i, atomic_long_t *l)
80211 {
80212 atomic64_t *v = (atomic64_t *)l;
80213@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
80214 atomic64_add(i, v);
80215 }
80216
80217+#ifdef CONFIG_PAX_REFCOUNT
80218+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
80219+{
80220+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80221+
80222+ atomic64_add_unchecked(i, v);
80223+}
80224+#endif
80225+
80226 static inline void atomic_long_sub(long i, atomic_long_t *l)
80227 {
80228 atomic64_t *v = (atomic64_t *)l;
80229@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
80230 atomic64_sub(i, v);
80231 }
80232
80233+#ifdef CONFIG_PAX_REFCOUNT
80234+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
80235+{
80236+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80237+
80238+ atomic64_sub_unchecked(i, v);
80239+}
80240+#endif
80241+
80242 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
80243 {
80244 atomic64_t *v = (atomic64_t *)l;
80245@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
80246 return atomic64_add_negative(i, v);
80247 }
80248
80249-static inline long atomic_long_add_return(long i, atomic_long_t *l)
80250+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
80251 {
80252 atomic64_t *v = (atomic64_t *)l;
80253
80254 return (long)atomic64_add_return(i, v);
80255 }
80256
80257+#ifdef CONFIG_PAX_REFCOUNT
80258+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
80259+{
80260+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80261+
80262+ return (long)atomic64_add_return_unchecked(i, v);
80263+}
80264+#endif
80265+
80266 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
80267 {
80268 atomic64_t *v = (atomic64_t *)l;
80269@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
80270 return (long)atomic64_inc_return(v);
80271 }
80272
80273+#ifdef CONFIG_PAX_REFCOUNT
80274+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
80275+{
80276+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80277+
80278+ return (long)atomic64_inc_return_unchecked(v);
80279+}
80280+#endif
80281+
80282 static inline long atomic_long_dec_return(atomic_long_t *l)
80283 {
80284 atomic64_t *v = (atomic64_t *)l;
80285@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
80286
80287 typedef atomic_t atomic_long_t;
80288
80289+#ifdef CONFIG_PAX_REFCOUNT
80290+typedef atomic_unchecked_t atomic_long_unchecked_t;
80291+#else
80292+typedef atomic_t atomic_long_unchecked_t;
80293+#endif
80294+
80295 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
80296 static inline long atomic_long_read(atomic_long_t *l)
80297 {
80298@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
80299 return (long)atomic_read(v);
80300 }
80301
80302+#ifdef CONFIG_PAX_REFCOUNT
80303+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
80304+{
80305+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80306+
80307+ return (long)atomic_read_unchecked(v);
80308+}
80309+#endif
80310+
80311 static inline void atomic_long_set(atomic_long_t *l, long i)
80312 {
80313 atomic_t *v = (atomic_t *)l;
80314@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
80315 atomic_set(v, i);
80316 }
80317
80318+#ifdef CONFIG_PAX_REFCOUNT
80319+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
80320+{
80321+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80322+
80323+ atomic_set_unchecked(v, i);
80324+}
80325+#endif
80326+
80327 static inline void atomic_long_inc(atomic_long_t *l)
80328 {
80329 atomic_t *v = (atomic_t *)l;
80330@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
80331 atomic_inc(v);
80332 }
80333
80334+#ifdef CONFIG_PAX_REFCOUNT
80335+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
80336+{
80337+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80338+
80339+ atomic_inc_unchecked(v);
80340+}
80341+#endif
80342+
80343 static inline void atomic_long_dec(atomic_long_t *l)
80344 {
80345 atomic_t *v = (atomic_t *)l;
80346@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
80347 atomic_dec(v);
80348 }
80349
80350+#ifdef CONFIG_PAX_REFCOUNT
80351+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
80352+{
80353+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80354+
80355+ atomic_dec_unchecked(v);
80356+}
80357+#endif
80358+
80359 static inline void atomic_long_add(long i, atomic_long_t *l)
80360 {
80361 atomic_t *v = (atomic_t *)l;
80362@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
80363 atomic_add(i, v);
80364 }
80365
80366+#ifdef CONFIG_PAX_REFCOUNT
80367+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
80368+{
80369+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80370+
80371+ atomic_add_unchecked(i, v);
80372+}
80373+#endif
80374+
80375 static inline void atomic_long_sub(long i, atomic_long_t *l)
80376 {
80377 atomic_t *v = (atomic_t *)l;
80378@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
80379 atomic_sub(i, v);
80380 }
80381
80382+#ifdef CONFIG_PAX_REFCOUNT
80383+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
80384+{
80385+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80386+
80387+ atomic_sub_unchecked(i, v);
80388+}
80389+#endif
80390+
80391 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
80392 {
80393 atomic_t *v = (atomic_t *)l;
80394@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
80395 return (long)atomic_add_return(i, v);
80396 }
80397
80398+#ifdef CONFIG_PAX_REFCOUNT
80399+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
80400+{
80401+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80402+
80403+ return (long)atomic_add_return_unchecked(i, v);
80404+}
80405+
80406+#endif
80407+
80408 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
80409 {
80410 atomic_t *v = (atomic_t *)l;
80411@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
80412 return (long)atomic_inc_return(v);
80413 }
80414
80415+#ifdef CONFIG_PAX_REFCOUNT
80416+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
80417+{
80418+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80419+
80420+ return (long)atomic_inc_return_unchecked(v);
80421+}
80422+#endif
80423+
80424 static inline long atomic_long_dec_return(atomic_long_t *l)
80425 {
80426 atomic_t *v = (atomic_t *)l;
80427@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
80428
80429 #endif /* BITS_PER_LONG == 64 */
80430
80431+#ifdef CONFIG_PAX_REFCOUNT
80432+static inline void pax_refcount_needs_these_functions(void)
80433+{
80434+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
80435+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
80436+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
80437+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
80438+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
80439+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
80440+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
80441+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
80442+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
80443+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
80444+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
80445+#ifdef CONFIG_X86
80446+ atomic_clear_mask_unchecked(0, NULL);
80447+ atomic_set_mask_unchecked(0, NULL);
80448+#endif
80449+
80450+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
80451+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
80452+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
80453+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
80454+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
80455+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
80456+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
80457+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
80458+}
80459+#else
80460+#define atomic_read_unchecked(v) atomic_read(v)
80461+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
80462+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
80463+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
80464+#define atomic_inc_unchecked(v) atomic_inc(v)
80465+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
80466+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
80467+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
80468+#define atomic_dec_unchecked(v) atomic_dec(v)
80469+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
80470+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
80471+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
80472+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
80473+
80474+#define atomic_long_read_unchecked(v) atomic_long_read(v)
80475+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
80476+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
80477+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
80478+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
80479+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
80480+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
80481+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
80482+#endif
80483+
80484 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
80485diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
80486index 9c79e76..9f7827d 100644
80487--- a/include/asm-generic/atomic.h
80488+++ b/include/asm-generic/atomic.h
80489@@ -154,7 +154,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
80490 * Atomically clears the bits set in @mask from @v
80491 */
80492 #ifndef atomic_clear_mask
80493-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
80494+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
80495 {
80496 unsigned long flags;
80497
80498diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
80499index b18ce4f..2ee2843 100644
80500--- a/include/asm-generic/atomic64.h
80501+++ b/include/asm-generic/atomic64.h
80502@@ -16,6 +16,8 @@ typedef struct {
80503 long long counter;
80504 } atomic64_t;
80505
80506+typedef atomic64_t atomic64_unchecked_t;
80507+
80508 #define ATOMIC64_INIT(i) { (i) }
80509
80510 extern long long atomic64_read(const atomic64_t *v);
80511@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
80512 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
80513 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
80514
80515+#define atomic64_read_unchecked(v) atomic64_read(v)
80516+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
80517+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
80518+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
80519+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
80520+#define atomic64_inc_unchecked(v) atomic64_inc(v)
80521+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
80522+#define atomic64_dec_unchecked(v) atomic64_dec(v)
80523+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
80524+
80525 #endif /* _ASM_GENERIC_ATOMIC64_H */
80526diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
80527index 1402fa8..025a736 100644
80528--- a/include/asm-generic/barrier.h
80529+++ b/include/asm-generic/barrier.h
80530@@ -74,7 +74,7 @@
80531 do { \
80532 compiletime_assert_atomic_type(*p); \
80533 smp_mb(); \
80534- ACCESS_ONCE(*p) = (v); \
80535+ ACCESS_ONCE_RW(*p) = (v); \
80536 } while (0)
80537
80538 #define smp_load_acquire(p) \
80539diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
80540index a60a7cc..0fe12f2 100644
80541--- a/include/asm-generic/bitops/__fls.h
80542+++ b/include/asm-generic/bitops/__fls.h
80543@@ -9,7 +9,7 @@
80544 *
80545 * Undefined if no set bit exists, so code should check against 0 first.
80546 */
80547-static __always_inline unsigned long __fls(unsigned long word)
80548+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
80549 {
80550 int num = BITS_PER_LONG - 1;
80551
80552diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
80553index 0576d1f..dad6c71 100644
80554--- a/include/asm-generic/bitops/fls.h
80555+++ b/include/asm-generic/bitops/fls.h
80556@@ -9,7 +9,7 @@
80557 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
80558 */
80559
80560-static __always_inline int fls(int x)
80561+static __always_inline int __intentional_overflow(-1) fls(int x)
80562 {
80563 int r = 32;
80564
80565diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
80566index b097cf8..3d40e14 100644
80567--- a/include/asm-generic/bitops/fls64.h
80568+++ b/include/asm-generic/bitops/fls64.h
80569@@ -15,7 +15,7 @@
80570 * at position 64.
80571 */
80572 #if BITS_PER_LONG == 32
80573-static __always_inline int fls64(__u64 x)
80574+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
80575 {
80576 __u32 h = x >> 32;
80577 if (h)
80578@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
80579 return fls(x);
80580 }
80581 #elif BITS_PER_LONG == 64
80582-static __always_inline int fls64(__u64 x)
80583+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
80584 {
80585 if (x == 0)
80586 return 0;
80587diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
80588index 1bfcfe5..e04c5c9 100644
80589--- a/include/asm-generic/cache.h
80590+++ b/include/asm-generic/cache.h
80591@@ -6,7 +6,7 @@
80592 * cache lines need to provide their own cache.h.
80593 */
80594
80595-#define L1_CACHE_SHIFT 5
80596-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
80597+#define L1_CACHE_SHIFT 5UL
80598+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
80599
80600 #endif /* __ASM_GENERIC_CACHE_H */
80601diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
80602index 0d68a1e..b74a761 100644
80603--- a/include/asm-generic/emergency-restart.h
80604+++ b/include/asm-generic/emergency-restart.h
80605@@ -1,7 +1,7 @@
80606 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
80607 #define _ASM_GENERIC_EMERGENCY_RESTART_H
80608
80609-static inline void machine_emergency_restart(void)
80610+static inline __noreturn void machine_emergency_restart(void)
80611 {
80612 machine_restart(NULL);
80613 }
80614diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
80615index 975e1cc..0b8a083 100644
80616--- a/include/asm-generic/io.h
80617+++ b/include/asm-generic/io.h
80618@@ -289,7 +289,7 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
80619 * These are pretty trivial
80620 */
80621 #ifndef virt_to_phys
80622-static inline unsigned long virt_to_phys(volatile void *address)
80623+static inline unsigned long __intentional_overflow(-1) virt_to_phys(volatile void *address)
80624 {
80625 return __pa((unsigned long)address);
80626 }
80627diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
80628index 90f99c7..00ce236 100644
80629--- a/include/asm-generic/kmap_types.h
80630+++ b/include/asm-generic/kmap_types.h
80631@@ -2,9 +2,9 @@
80632 #define _ASM_GENERIC_KMAP_TYPES_H
80633
80634 #ifdef __WITH_KM_FENCE
80635-# define KM_TYPE_NR 41
80636+# define KM_TYPE_NR 42
80637 #else
80638-# define KM_TYPE_NR 20
80639+# define KM_TYPE_NR 21
80640 #endif
80641
80642 #endif
80643diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
80644index 9ceb03b..62b0b8f 100644
80645--- a/include/asm-generic/local.h
80646+++ b/include/asm-generic/local.h
80647@@ -23,24 +23,37 @@ typedef struct
80648 atomic_long_t a;
80649 } local_t;
80650
80651+typedef struct {
80652+ atomic_long_unchecked_t a;
80653+} local_unchecked_t;
80654+
80655 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
80656
80657 #define local_read(l) atomic_long_read(&(l)->a)
80658+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
80659 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
80660+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
80661 #define local_inc(l) atomic_long_inc(&(l)->a)
80662+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
80663 #define local_dec(l) atomic_long_dec(&(l)->a)
80664+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
80665 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
80666+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
80667 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
80668+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
80669
80670 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
80671 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
80672 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
80673 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
80674 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
80675+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
80676 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
80677 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
80678+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
80679
80680 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
80681+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
80682 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
80683 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
80684 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
80685diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
80686index 725612b..9cc513a 100644
80687--- a/include/asm-generic/pgtable-nopmd.h
80688+++ b/include/asm-generic/pgtable-nopmd.h
80689@@ -1,14 +1,19 @@
80690 #ifndef _PGTABLE_NOPMD_H
80691 #define _PGTABLE_NOPMD_H
80692
80693-#ifndef __ASSEMBLY__
80694-
80695 #include <asm-generic/pgtable-nopud.h>
80696
80697-struct mm_struct;
80698-
80699 #define __PAGETABLE_PMD_FOLDED
80700
80701+#define PMD_SHIFT PUD_SHIFT
80702+#define PTRS_PER_PMD 1
80703+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
80704+#define PMD_MASK (~(PMD_SIZE-1))
80705+
80706+#ifndef __ASSEMBLY__
80707+
80708+struct mm_struct;
80709+
80710 /*
80711 * Having the pmd type consist of a pud gets the size right, and allows
80712 * us to conceptually access the pud entry that this pmd is folded into
80713@@ -16,11 +21,6 @@ struct mm_struct;
80714 */
80715 typedef struct { pud_t pud; } pmd_t;
80716
80717-#define PMD_SHIFT PUD_SHIFT
80718-#define PTRS_PER_PMD 1
80719-#define PMD_SIZE (1UL << PMD_SHIFT)
80720-#define PMD_MASK (~(PMD_SIZE-1))
80721-
80722 /*
80723 * The "pud_xxx()" functions here are trivial for a folded two-level
80724 * setup: the pmd is never bad, and a pmd always exists (as it's folded
80725diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
80726index 810431d..0ec4804f 100644
80727--- a/include/asm-generic/pgtable-nopud.h
80728+++ b/include/asm-generic/pgtable-nopud.h
80729@@ -1,10 +1,15 @@
80730 #ifndef _PGTABLE_NOPUD_H
80731 #define _PGTABLE_NOPUD_H
80732
80733-#ifndef __ASSEMBLY__
80734-
80735 #define __PAGETABLE_PUD_FOLDED
80736
80737+#define PUD_SHIFT PGDIR_SHIFT
80738+#define PTRS_PER_PUD 1
80739+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
80740+#define PUD_MASK (~(PUD_SIZE-1))
80741+
80742+#ifndef __ASSEMBLY__
80743+
80744 /*
80745 * Having the pud type consist of a pgd gets the size right, and allows
80746 * us to conceptually access the pgd entry that this pud is folded into
80747@@ -12,11 +17,6 @@
80748 */
80749 typedef struct { pgd_t pgd; } pud_t;
80750
80751-#define PUD_SHIFT PGDIR_SHIFT
80752-#define PTRS_PER_PUD 1
80753-#define PUD_SIZE (1UL << PUD_SHIFT)
80754-#define PUD_MASK (~(PUD_SIZE-1))
80755-
80756 /*
80757 * The "pgd_xxx()" functions here are trivial for a folded two-level
80758 * setup: the pud is never bad, and a pud always exists (as it's folded
80759@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
80760 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
80761
80762 #define pgd_populate(mm, pgd, pud) do { } while (0)
80763+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
80764 /*
80765 * (puds are folded into pgds so this doesn't get actually called,
80766 * but the define is needed for a generic inline function.)
80767diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
80768index 53b2acc..f4568e7 100644
80769--- a/include/asm-generic/pgtable.h
80770+++ b/include/asm-generic/pgtable.h
80771@@ -819,6 +819,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
80772 }
80773 #endif /* CONFIG_NUMA_BALANCING */
80774
80775+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
80776+#ifdef CONFIG_PAX_KERNEXEC
80777+#error KERNEXEC requires pax_open_kernel
80778+#else
80779+static inline unsigned long pax_open_kernel(void) { return 0; }
80780+#endif
80781+#endif
80782+
80783+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
80784+#ifdef CONFIG_PAX_KERNEXEC
80785+#error KERNEXEC requires pax_close_kernel
80786+#else
80787+static inline unsigned long pax_close_kernel(void) { return 0; }
80788+#endif
80789+#endif
80790+
80791 #endif /* CONFIG_MMU */
80792
80793 #endif /* !__ASSEMBLY__ */
80794diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
80795index 72d8803..cb9749c 100644
80796--- a/include/asm-generic/uaccess.h
80797+++ b/include/asm-generic/uaccess.h
80798@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
80799 return __clear_user(to, n);
80800 }
80801
80802+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
80803+#ifdef CONFIG_PAX_MEMORY_UDEREF
80804+#error UDEREF requires pax_open_userland
80805+#else
80806+static inline unsigned long pax_open_userland(void) { return 0; }
80807+#endif
80808+#endif
80809+
80810+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
80811+#ifdef CONFIG_PAX_MEMORY_UDEREF
80812+#error UDEREF requires pax_close_userland
80813+#else
80814+static inline unsigned long pax_close_userland(void) { return 0; }
80815+#endif
80816+#endif
80817+
80818 #endif /* __ASM_GENERIC_UACCESS_H */
80819diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
80820index c1c0b0c..05c9588 100644
80821--- a/include/asm-generic/vmlinux.lds.h
80822+++ b/include/asm-generic/vmlinux.lds.h
80823@@ -231,6 +231,7 @@
80824 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
80825 VMLINUX_SYMBOL(__start_rodata) = .; \
80826 *(.rodata) *(.rodata.*) \
80827+ *(.data..read_only) \
80828 *(__vermagic) /* Kernel version magic */ \
80829 . = ALIGN(8); \
80830 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
80831@@ -719,17 +720,18 @@
80832 * section in the linker script will go there too. @phdr should have
80833 * a leading colon.
80834 *
80835- * Note that this macros defines __per_cpu_load as an absolute symbol.
80836+ * Note that this macros defines per_cpu_load as an absolute symbol.
80837 * If there is no need to put the percpu section at a predetermined
80838 * address, use PERCPU_SECTION.
80839 */
80840 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
80841- VMLINUX_SYMBOL(__per_cpu_load) = .; \
80842- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
80843+ per_cpu_load = .; \
80844+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
80845 - LOAD_OFFSET) { \
80846+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
80847 PERCPU_INPUT(cacheline) \
80848 } phdr \
80849- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
80850+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
80851
80852 /**
80853 * PERCPU_SECTION - define output section for percpu area, simple version
80854diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
80855index 016c2f1..c4baa98 100644
80856--- a/include/crypto/algapi.h
80857+++ b/include/crypto/algapi.h
80858@@ -34,7 +34,7 @@ struct crypto_type {
80859 unsigned int maskclear;
80860 unsigned int maskset;
80861 unsigned int tfmsize;
80862-};
80863+} __do_const;
80864
80865 struct crypto_instance {
80866 struct crypto_alg alg;
80867diff --git a/include/drm/drmP.h b/include/drm/drmP.h
80868index 8af71a8..7fe6c19 100644
80869--- a/include/drm/drmP.h
80870+++ b/include/drm/drmP.h
80871@@ -68,6 +68,7 @@
80872 #include <linux/workqueue.h>
80873 #include <linux/poll.h>
80874 #include <asm/pgalloc.h>
80875+#include <asm/local.h>
80876 #include <drm/drm.h>
80877 #include <drm/drm_sarea.h>
80878 #include <drm/drm_vma_manager.h>
80879@@ -261,10 +262,12 @@ do { \
80880 * \param cmd command.
80881 * \param arg argument.
80882 */
80883-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
80884+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
80885+ struct drm_file *file_priv);
80886+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
80887 struct drm_file *file_priv);
80888
80889-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80890+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
80891 unsigned long arg);
80892
80893 #define DRM_IOCTL_NR(n) _IOC_NR(n)
80894@@ -280,10 +283,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80895 struct drm_ioctl_desc {
80896 unsigned int cmd;
80897 int flags;
80898- drm_ioctl_t *func;
80899+ drm_ioctl_t func;
80900 unsigned int cmd_drv;
80901 const char *name;
80902-};
80903+} __do_const;
80904
80905 /**
80906 * Creates a driver or general drm_ioctl_desc array entry for the given
80907@@ -983,7 +986,8 @@ struct drm_info_list {
80908 int (*show)(struct seq_file*, void*); /** show callback */
80909 u32 driver_features; /**< Required driver features for this entry */
80910 void *data;
80911-};
80912+} __do_const;
80913+typedef struct drm_info_list __no_const drm_info_list_no_const;
80914
80915 /**
80916 * debugfs node structure. This structure represents a debugfs file.
80917@@ -1067,7 +1071,7 @@ struct drm_device {
80918
80919 /** \name Usage Counters */
80920 /*@{ */
80921- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80922+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80923 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
80924 int buf_use; /**< Buffers in use -- cannot alloc */
80925 atomic_t buf_alloc; /**< Buffer allocation in progress */
80926diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
80927index a3d75fe..6802f9c 100644
80928--- a/include/drm/drm_crtc_helper.h
80929+++ b/include/drm/drm_crtc_helper.h
80930@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
80931 struct drm_connector *connector);
80932 /* disable encoder when not in use - more explicit than dpms off */
80933 void (*disable)(struct drm_encoder *encoder);
80934-};
80935+} __no_const;
80936
80937 /**
80938 * drm_connector_helper_funcs - helper operations for connectors
80939diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
80940index a70d456..6ea07cd 100644
80941--- a/include/drm/i915_pciids.h
80942+++ b/include/drm/i915_pciids.h
80943@@ -37,7 +37,7 @@
80944 */
80945 #define INTEL_VGA_DEVICE(id, info) { \
80946 0x8086, id, \
80947- ~0, ~0, \
80948+ PCI_ANY_ID, PCI_ANY_ID, \
80949 0x030000, 0xff0000, \
80950 (unsigned long) info }
80951
80952diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
80953index 72dcbe8..8db58d7 100644
80954--- a/include/drm/ttm/ttm_memory.h
80955+++ b/include/drm/ttm/ttm_memory.h
80956@@ -48,7 +48,7 @@
80957
80958 struct ttm_mem_shrink {
80959 int (*do_shrink) (struct ttm_mem_shrink *);
80960-};
80961+} __no_const;
80962
80963 /**
80964 * struct ttm_mem_global - Global memory accounting structure.
80965diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
80966index 49a8284..9643967 100644
80967--- a/include/drm/ttm/ttm_page_alloc.h
80968+++ b/include/drm/ttm/ttm_page_alloc.h
80969@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
80970 */
80971 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
80972
80973+struct device;
80974 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80975 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80976
80977diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
80978index 4b840e8..155d235 100644
80979--- a/include/keys/asymmetric-subtype.h
80980+++ b/include/keys/asymmetric-subtype.h
80981@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
80982 /* Verify the signature on a key of this subtype (optional) */
80983 int (*verify_signature)(const struct key *key,
80984 const struct public_key_signature *sig);
80985-};
80986+} __do_const;
80987
80988 /**
80989 * asymmetric_key_subtype - Get the subtype from an asymmetric key
80990diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
80991index c1da539..1dcec55 100644
80992--- a/include/linux/atmdev.h
80993+++ b/include/linux/atmdev.h
80994@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
80995 #endif
80996
80997 struct k_atm_aal_stats {
80998-#define __HANDLE_ITEM(i) atomic_t i
80999+#define __HANDLE_ITEM(i) atomic_unchecked_t i
81000 __AAL_STAT_ITEMS
81001 #undef __HANDLE_ITEM
81002 };
81003@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
81004 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
81005 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
81006 struct module *owner;
81007-};
81008+} __do_const ;
81009
81010 struct atmphy_ops {
81011 int (*start)(struct atm_dev *dev);
81012diff --git a/include/linux/audit.h b/include/linux/audit.h
81013index 22cfddb..ab759e8 100644
81014--- a/include/linux/audit.h
81015+++ b/include/linux/audit.h
81016@@ -210,7 +210,7 @@ static inline void audit_ptrace(struct task_struct *t)
81017 extern unsigned int audit_serial(void);
81018 extern int auditsc_get_stamp(struct audit_context *ctx,
81019 struct timespec *t, unsigned int *serial);
81020-extern int audit_set_loginuid(kuid_t loginuid);
81021+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
81022
81023 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
81024 {
81025diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
81026index 61f29e5..e67c658 100644
81027--- a/include/linux/binfmts.h
81028+++ b/include/linux/binfmts.h
81029@@ -44,7 +44,7 @@ struct linux_binprm {
81030 unsigned interp_flags;
81031 unsigned interp_data;
81032 unsigned long loader, exec;
81033-};
81034+} __randomize_layout;
81035
81036 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
81037 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
81038@@ -73,8 +73,10 @@ struct linux_binfmt {
81039 int (*load_binary)(struct linux_binprm *);
81040 int (*load_shlib)(struct file *);
81041 int (*core_dump)(struct coredump_params *cprm);
81042+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
81043+ void (*handle_mmap)(struct file *);
81044 unsigned long min_coredump; /* minimal dump size */
81045-};
81046+} __do_const __randomize_layout;
81047
81048 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
81049
81050diff --git a/include/linux/bitops.h b/include/linux/bitops.h
81051index cbc5833..8123ebc 100644
81052--- a/include/linux/bitops.h
81053+++ b/include/linux/bitops.h
81054@@ -122,7 +122,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
81055 * @word: value to rotate
81056 * @shift: bits to roll
81057 */
81058-static inline __u32 rol32(__u32 word, unsigned int shift)
81059+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
81060 {
81061 return (word << shift) | (word >> (32 - shift));
81062 }
81063@@ -132,7 +132,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
81064 * @word: value to rotate
81065 * @shift: bits to roll
81066 */
81067-static inline __u32 ror32(__u32 word, unsigned int shift)
81068+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
81069 {
81070 return (word >> shift) | (word << (32 - shift));
81071 }
81072@@ -188,7 +188,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
81073 return (__s32)(value << shift) >> shift;
81074 }
81075
81076-static inline unsigned fls_long(unsigned long l)
81077+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
81078 {
81079 if (sizeof(l) == 4)
81080 return fls(l);
81081diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
81082index 8699bcf..279485d 100644
81083--- a/include/linux/blkdev.h
81084+++ b/include/linux/blkdev.h
81085@@ -1625,7 +1625,7 @@ struct block_device_operations {
81086 /* this callback is with swap_lock and sometimes page table lock held */
81087 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
81088 struct module *owner;
81089-};
81090+} __do_const;
81091
81092 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
81093 unsigned long);
81094diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
81095index afc1343..9735539 100644
81096--- a/include/linux/blktrace_api.h
81097+++ b/include/linux/blktrace_api.h
81098@@ -25,7 +25,7 @@ struct blk_trace {
81099 struct dentry *dropped_file;
81100 struct dentry *msg_file;
81101 struct list_head running_list;
81102- atomic_t dropped;
81103+ atomic_unchecked_t dropped;
81104 };
81105
81106 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
81107diff --git a/include/linux/cache.h b/include/linux/cache.h
81108index 17e7e82..1d7da26 100644
81109--- a/include/linux/cache.h
81110+++ b/include/linux/cache.h
81111@@ -16,6 +16,14 @@
81112 #define __read_mostly
81113 #endif
81114
81115+#ifndef __read_only
81116+#ifdef CONFIG_PAX_KERNEXEC
81117+#error KERNEXEC requires __read_only
81118+#else
81119+#define __read_only __read_mostly
81120+#endif
81121+#endif
81122+
81123 #ifndef ____cacheline_aligned
81124 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
81125 #endif
81126diff --git a/include/linux/capability.h b/include/linux/capability.h
81127index 84b13ad..172cdee 100644
81128--- a/include/linux/capability.h
81129+++ b/include/linux/capability.h
81130@@ -211,9 +211,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
81131 extern bool capable(int cap);
81132 extern bool ns_capable(struct user_namespace *ns, int cap);
81133 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
81134+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
81135 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
81136+extern bool capable_nolog(int cap);
81137+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
81138
81139 /* audit system wants to get cap info from files as well */
81140 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
81141
81142+extern int is_privileged_binary(const struct dentry *dentry);
81143+
81144 #endif /* !_LINUX_CAPABILITY_H */
81145diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
81146index 8609d57..86e4d79 100644
81147--- a/include/linux/cdrom.h
81148+++ b/include/linux/cdrom.h
81149@@ -87,7 +87,6 @@ struct cdrom_device_ops {
81150
81151 /* driver specifications */
81152 const int capability; /* capability flags */
81153- int n_minors; /* number of active minor devices */
81154 /* handle uniform packets for scsi type devices (scsi,atapi) */
81155 int (*generic_packet) (struct cdrom_device_info *,
81156 struct packet_command *);
81157diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
81158index 4ce9056..86caac6 100644
81159--- a/include/linux/cleancache.h
81160+++ b/include/linux/cleancache.h
81161@@ -31,7 +31,7 @@ struct cleancache_ops {
81162 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
81163 void (*invalidate_inode)(int, struct cleancache_filekey);
81164 void (*invalidate_fs)(int);
81165-};
81166+} __no_const;
81167
81168 extern struct cleancache_ops *
81169 cleancache_register_ops(struct cleancache_ops *ops);
81170diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
81171index 0c287db..5efa775 100644
81172--- a/include/linux/clk-provider.h
81173+++ b/include/linux/clk-provider.h
81174@@ -180,6 +180,7 @@ struct clk_ops {
81175 void (*init)(struct clk_hw *hw);
81176 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
81177 };
81178+typedef struct clk_ops __no_const clk_ops_no_const;
81179
81180 /**
81181 * struct clk_init_data - holds init data that's common to all clocks and is
81182diff --git a/include/linux/compat.h b/include/linux/compat.h
81183index e649426..a74047b 100644
81184--- a/include/linux/compat.h
81185+++ b/include/linux/compat.h
81186@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
81187 compat_size_t __user *len_ptr);
81188
81189 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
81190-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
81191+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
81192 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
81193 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
81194 compat_ssize_t msgsz, int msgflg);
81195@@ -436,7 +436,7 @@ extern int compat_ptrace_request(struct task_struct *child,
81196 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
81197 compat_ulong_t addr, compat_ulong_t data);
81198 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
81199- compat_long_t addr, compat_long_t data);
81200+ compat_ulong_t addr, compat_ulong_t data);
81201
81202 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
81203 /*
81204diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
81205index 2507fd2..55203f8 100644
81206--- a/include/linux/compiler-gcc4.h
81207+++ b/include/linux/compiler-gcc4.h
81208@@ -39,9 +39,34 @@
81209 # define __compiletime_warning(message) __attribute__((warning(message)))
81210 # define __compiletime_error(message) __attribute__((error(message)))
81211 #endif /* __CHECKER__ */
81212+
81213+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
81214+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
81215+#define __bos0(ptr) __bos((ptr), 0)
81216+#define __bos1(ptr) __bos((ptr), 1)
81217 #endif /* GCC_VERSION >= 40300 */
81218
81219 #if GCC_VERSION >= 40500
81220+
81221+#ifdef RANDSTRUCT_PLUGIN
81222+#define __randomize_layout __attribute__((randomize_layout))
81223+#define __no_randomize_layout __attribute__((no_randomize_layout))
81224+#endif
81225+
81226+#ifdef CONSTIFY_PLUGIN
81227+#define __no_const __attribute__((no_const))
81228+#define __do_const __attribute__((do_const))
81229+#endif
81230+
81231+#ifdef SIZE_OVERFLOW_PLUGIN
81232+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
81233+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
81234+#endif
81235+
81236+#ifdef LATENT_ENTROPY_PLUGIN
81237+#define __latent_entropy __attribute__((latent_entropy))
81238+#endif
81239+
81240 /*
81241 * Mark a position in code as unreachable. This can be used to
81242 * suppress control flow warnings after asm blocks that transfer
81243diff --git a/include/linux/compiler.h b/include/linux/compiler.h
81244index d5ad7b1..3b74638 100644
81245--- a/include/linux/compiler.h
81246+++ b/include/linux/compiler.h
81247@@ -5,11 +5,14 @@
81248
81249 #ifdef __CHECKER__
81250 # define __user __attribute__((noderef, address_space(1)))
81251+# define __force_user __force __user
81252 # define __kernel __attribute__((address_space(0)))
81253+# define __force_kernel __force __kernel
81254 # define __safe __attribute__((safe))
81255 # define __force __attribute__((force))
81256 # define __nocast __attribute__((nocast))
81257 # define __iomem __attribute__((noderef, address_space(2)))
81258+# define __force_iomem __force __iomem
81259 # define __must_hold(x) __attribute__((context(x,1,1)))
81260 # define __acquires(x) __attribute__((context(x,0,1)))
81261 # define __releases(x) __attribute__((context(x,1,0)))
81262@@ -17,20 +20,37 @@
81263 # define __release(x) __context__(x,-1)
81264 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
81265 # define __percpu __attribute__((noderef, address_space(3)))
81266+# define __force_percpu __force __percpu
81267 #ifdef CONFIG_SPARSE_RCU_POINTER
81268 # define __rcu __attribute__((noderef, address_space(4)))
81269+# define __force_rcu __force __rcu
81270 #else
81271 # define __rcu
81272+# define __force_rcu
81273 #endif
81274 extern void __chk_user_ptr(const volatile void __user *);
81275 extern void __chk_io_ptr(const volatile void __iomem *);
81276 #else
81277-# define __user
81278-# define __kernel
81279+# ifdef CHECKER_PLUGIN
81280+//# define __user
81281+//# define __force_user
81282+//# define __kernel
81283+//# define __force_kernel
81284+# else
81285+# ifdef STRUCTLEAK_PLUGIN
81286+# define __user __attribute__((user))
81287+# else
81288+# define __user
81289+# endif
81290+# define __force_user
81291+# define __kernel
81292+# define __force_kernel
81293+# endif
81294 # define __safe
81295 # define __force
81296 # define __nocast
81297 # define __iomem
81298+# define __force_iomem
81299 # define __chk_user_ptr(x) (void)0
81300 # define __chk_io_ptr(x) (void)0
81301 # define __builtin_warning(x, y...) (1)
81302@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
81303 # define __release(x) (void)0
81304 # define __cond_lock(x,c) (c)
81305 # define __percpu
81306+# define __force_percpu
81307 # define __rcu
81308+# define __force_rcu
81309 #endif
81310
81311 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
81312@@ -286,6 +308,34 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81313 # define __attribute_const__ /* unimplemented */
81314 #endif
81315
81316+#ifndef __randomize_layout
81317+# define __randomize_layout
81318+#endif
81319+
81320+#ifndef __no_randomize_layout
81321+# define __no_randomize_layout
81322+#endif
81323+
81324+#ifndef __no_const
81325+# define __no_const
81326+#endif
81327+
81328+#ifndef __do_const
81329+# define __do_const
81330+#endif
81331+
81332+#ifndef __size_overflow
81333+# define __size_overflow(...)
81334+#endif
81335+
81336+#ifndef __intentional_overflow
81337+# define __intentional_overflow(...)
81338+#endif
81339+
81340+#ifndef __latent_entropy
81341+# define __latent_entropy
81342+#endif
81343+
81344 /*
81345 * Tell gcc if a function is cold. The compiler will assume any path
81346 * directly leading to the call is unlikely.
81347@@ -295,6 +345,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81348 #define __cold
81349 #endif
81350
81351+#ifndef __alloc_size
81352+#define __alloc_size(...)
81353+#endif
81354+
81355+#ifndef __bos
81356+#define __bos(ptr, arg)
81357+#endif
81358+
81359+#ifndef __bos0
81360+#define __bos0(ptr)
81361+#endif
81362+
81363+#ifndef __bos1
81364+#define __bos1(ptr)
81365+#endif
81366+
81367 /* Simple shorthand for a section definition */
81368 #ifndef __section
81369 # define __section(S) __attribute__ ((__section__(#S)))
81370@@ -378,7 +444,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81371 * use is to mediate communication between process-level code and irq/NMI
81372 * handlers, all running on the same CPU.
81373 */
81374-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
81375+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
81376+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
81377
81378 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
81379 #ifdef CONFIG_KPROBES
81380diff --git a/include/linux/completion.h b/include/linux/completion.h
81381index 5d5aaae..0ea9b84 100644
81382--- a/include/linux/completion.h
81383+++ b/include/linux/completion.h
81384@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
81385
81386 extern void wait_for_completion(struct completion *);
81387 extern void wait_for_completion_io(struct completion *);
81388-extern int wait_for_completion_interruptible(struct completion *x);
81389-extern int wait_for_completion_killable(struct completion *x);
81390+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
81391+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
81392 extern unsigned long wait_for_completion_timeout(struct completion *x,
81393- unsigned long timeout);
81394+ unsigned long timeout) __intentional_overflow(-1);
81395 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
81396- unsigned long timeout);
81397+ unsigned long timeout) __intentional_overflow(-1);
81398 extern long wait_for_completion_interruptible_timeout(
81399- struct completion *x, unsigned long timeout);
81400+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
81401 extern long wait_for_completion_killable_timeout(
81402- struct completion *x, unsigned long timeout);
81403+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
81404 extern bool try_wait_for_completion(struct completion *x);
81405 extern bool completion_done(struct completion *x);
81406
81407diff --git a/include/linux/configfs.h b/include/linux/configfs.h
81408index 34025df..d94bbbc 100644
81409--- a/include/linux/configfs.h
81410+++ b/include/linux/configfs.h
81411@@ -125,7 +125,7 @@ struct configfs_attribute {
81412 const char *ca_name;
81413 struct module *ca_owner;
81414 umode_t ca_mode;
81415-};
81416+} __do_const;
81417
81418 /*
81419 * Users often need to create attribute structures for their configurable
81420diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
81421index 8f8ae95..b9b0e6d 100644
81422--- a/include/linux/cpufreq.h
81423+++ b/include/linux/cpufreq.h
81424@@ -202,6 +202,7 @@ struct global_attr {
81425 ssize_t (*store)(struct kobject *a, struct attribute *b,
81426 const char *c, size_t count);
81427 };
81428+typedef struct global_attr __no_const global_attr_no_const;
81429
81430 #define define_one_global_ro(_name) \
81431 static struct global_attr _name = \
81432@@ -268,7 +269,7 @@ struct cpufreq_driver {
81433 bool boost_supported;
81434 bool boost_enabled;
81435 int (*set_boost) (int state);
81436-};
81437+} __do_const;
81438
81439 /* flags */
81440 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
81441diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
81442index 25e0df6..952dffd 100644
81443--- a/include/linux/cpuidle.h
81444+++ b/include/linux/cpuidle.h
81445@@ -50,7 +50,8 @@ struct cpuidle_state {
81446 int index);
81447
81448 int (*enter_dead) (struct cpuidle_device *dev, int index);
81449-};
81450+} __do_const;
81451+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
81452
81453 /* Idle State Flags */
81454 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
81455@@ -209,7 +210,7 @@ struct cpuidle_governor {
81456 void (*reflect) (struct cpuidle_device *dev, int index);
81457
81458 struct module *owner;
81459-};
81460+} __do_const;
81461
81462 #ifdef CONFIG_CPU_IDLE
81463 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
81464diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
81465index 2997af6..424ddc1 100644
81466--- a/include/linux/cpumask.h
81467+++ b/include/linux/cpumask.h
81468@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
81469 }
81470
81471 /* Valid inputs for n are -1 and 0. */
81472-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81473+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
81474 {
81475 return n+1;
81476 }
81477
81478-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81479+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
81480 {
81481 return n+1;
81482 }
81483
81484-static inline unsigned int cpumask_next_and(int n,
81485+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
81486 const struct cpumask *srcp,
81487 const struct cpumask *andp)
81488 {
81489@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
81490 *
81491 * Returns >= nr_cpu_ids if no further cpus set.
81492 */
81493-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81494+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
81495 {
81496 /* -1 is a legal arg here. */
81497 if (n != -1)
81498@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81499 *
81500 * Returns >= nr_cpu_ids if no further cpus unset.
81501 */
81502-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81503+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
81504 {
81505 /* -1 is a legal arg here. */
81506 if (n != -1)
81507@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81508 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
81509 }
81510
81511-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
81512+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
81513 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
81514 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
81515
81516diff --git a/include/linux/cred.h b/include/linux/cred.h
81517index f61d6c8..d372d95 100644
81518--- a/include/linux/cred.h
81519+++ b/include/linux/cred.h
81520@@ -35,7 +35,7 @@ struct group_info {
81521 int nblocks;
81522 kgid_t small_block[NGROUPS_SMALL];
81523 kgid_t *blocks[0];
81524-};
81525+} __randomize_layout;
81526
81527 /**
81528 * get_group_info - Get a reference to a group info structure
81529@@ -136,7 +136,7 @@ struct cred {
81530 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
81531 struct group_info *group_info; /* supplementary groups for euid/fsgid */
81532 struct rcu_head rcu; /* RCU deletion hook */
81533-};
81534+} __randomize_layout;
81535
81536 extern void __put_cred(struct cred *);
81537 extern void exit_creds(struct task_struct *);
81538@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
81539 static inline void validate_process_creds(void)
81540 {
81541 }
81542+static inline void validate_task_creds(struct task_struct *task)
81543+{
81544+}
81545 #endif
81546
81547 /**
81548@@ -322,6 +325,7 @@ static inline void put_cred(const struct cred *_cred)
81549
81550 #define task_uid(task) (task_cred_xxx((task), uid))
81551 #define task_euid(task) (task_cred_xxx((task), euid))
81552+#define task_securebits(task) (task_cred_xxx((task), securebits))
81553
81554 #define current_cred_xxx(xxx) \
81555 ({ \
81556diff --git a/include/linux/crypto.h b/include/linux/crypto.h
81557index b92eadf..b4ecdc1 100644
81558--- a/include/linux/crypto.h
81559+++ b/include/linux/crypto.h
81560@@ -373,7 +373,7 @@ struct cipher_tfm {
81561 const u8 *key, unsigned int keylen);
81562 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
81563 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
81564-};
81565+} __no_const;
81566
81567 struct hash_tfm {
81568 int (*init)(struct hash_desc *desc);
81569@@ -394,13 +394,13 @@ struct compress_tfm {
81570 int (*cot_decompress)(struct crypto_tfm *tfm,
81571 const u8 *src, unsigned int slen,
81572 u8 *dst, unsigned int *dlen);
81573-};
81574+} __no_const;
81575
81576 struct rng_tfm {
81577 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
81578 unsigned int dlen);
81579 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
81580-};
81581+} __no_const;
81582
81583 #define crt_ablkcipher crt_u.ablkcipher
81584 #define crt_aead crt_u.aead
81585diff --git a/include/linux/ctype.h b/include/linux/ctype.h
81586index 653589e..4ef254a 100644
81587--- a/include/linux/ctype.h
81588+++ b/include/linux/ctype.h
81589@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
81590 * Fast implementation of tolower() for internal usage. Do not use in your
81591 * code.
81592 */
81593-static inline char _tolower(const char c)
81594+static inline unsigned char _tolower(const unsigned char c)
81595 {
81596 return c | 0x20;
81597 }
81598diff --git a/include/linux/dcache.h b/include/linux/dcache.h
81599index 3c7ec32..4ca97cc 100644
81600--- a/include/linux/dcache.h
81601+++ b/include/linux/dcache.h
81602@@ -133,7 +133,7 @@ struct dentry {
81603 } d_u;
81604 struct list_head d_subdirs; /* our children */
81605 struct hlist_node d_alias; /* inode alias list */
81606-};
81607+} __randomize_layout;
81608
81609 /*
81610 * dentry->d_lock spinlock nesting subclasses:
81611diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
81612index 7925bf0..d5143d2 100644
81613--- a/include/linux/decompress/mm.h
81614+++ b/include/linux/decompress/mm.h
81615@@ -77,7 +77,7 @@ static void free(void *where)
81616 * warnings when not needed (indeed large_malloc / large_free are not
81617 * needed by inflate */
81618
81619-#define malloc(a) kmalloc(a, GFP_KERNEL)
81620+#define malloc(a) kmalloc((a), GFP_KERNEL)
81621 #define free(a) kfree(a)
81622
81623 #define large_malloc(a) vmalloc(a)
81624diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
81625index f1863dc..5c26074 100644
81626--- a/include/linux/devfreq.h
81627+++ b/include/linux/devfreq.h
81628@@ -114,7 +114,7 @@ struct devfreq_governor {
81629 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
81630 int (*event_handler)(struct devfreq *devfreq,
81631 unsigned int event, void *data);
81632-};
81633+} __do_const;
81634
81635 /**
81636 * struct devfreq - Device devfreq structure
81637diff --git a/include/linux/device.h b/include/linux/device.h
81638index af424ac..fd46ddf 100644
81639--- a/include/linux/device.h
81640+++ b/include/linux/device.h
81641@@ -310,7 +310,7 @@ struct subsys_interface {
81642 struct list_head node;
81643 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
81644 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
81645-};
81646+} __do_const;
81647
81648 int subsys_interface_register(struct subsys_interface *sif);
81649 void subsys_interface_unregister(struct subsys_interface *sif);
81650@@ -506,7 +506,7 @@ struct device_type {
81651 void (*release)(struct device *dev);
81652
81653 const struct dev_pm_ops *pm;
81654-};
81655+} __do_const;
81656
81657 /* interface for exporting device attributes */
81658 struct device_attribute {
81659@@ -516,11 +516,12 @@ struct device_attribute {
81660 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
81661 const char *buf, size_t count);
81662 };
81663+typedef struct device_attribute __no_const device_attribute_no_const;
81664
81665 struct dev_ext_attribute {
81666 struct device_attribute attr;
81667 void *var;
81668-};
81669+} __do_const;
81670
81671 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
81672 char *buf);
81673diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
81674index 931b709..89b2d89 100644
81675--- a/include/linux/dma-mapping.h
81676+++ b/include/linux/dma-mapping.h
81677@@ -60,7 +60,7 @@ struct dma_map_ops {
81678 u64 (*get_required_mask)(struct device *dev);
81679 #endif
81680 int is_phys;
81681-};
81682+} __do_const;
81683
81684 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
81685
81686diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
81687index d2c5cc7..d193394 100644
81688--- a/include/linux/dmaengine.h
81689+++ b/include/linux/dmaengine.h
81690@@ -1147,9 +1147,9 @@ struct dma_pinned_list {
81691 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
81692 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
81693
81694-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
81695+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
81696 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
81697-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
81698+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
81699 struct dma_pinned_list *pinned_list, struct page *page,
81700 unsigned int offset, size_t len);
81701
81702diff --git a/include/linux/efi.h b/include/linux/efi.h
81703index 41bbf8b..bd3a718 100644
81704--- a/include/linux/efi.h
81705+++ b/include/linux/efi.h
81706@@ -1027,6 +1027,7 @@ struct efivar_operations {
81707 efi_set_variable_t *set_variable;
81708 efi_query_variable_store_t *query_variable_store;
81709 };
81710+typedef struct efivar_operations __no_const efivar_operations_no_const;
81711
81712 struct efivars {
81713 /*
81714diff --git a/include/linux/elf.h b/include/linux/elf.h
81715index 67a5fa7..b817372 100644
81716--- a/include/linux/elf.h
81717+++ b/include/linux/elf.h
81718@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
81719 #define elf_note elf32_note
81720 #define elf_addr_t Elf32_Off
81721 #define Elf_Half Elf32_Half
81722+#define elf_dyn Elf32_Dyn
81723
81724 #else
81725
81726@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
81727 #define elf_note elf64_note
81728 #define elf_addr_t Elf64_Off
81729 #define Elf_Half Elf64_Half
81730+#define elf_dyn Elf64_Dyn
81731
81732 #endif
81733
81734diff --git a/include/linux/err.h b/include/linux/err.h
81735index a729120..6ede2c9 100644
81736--- a/include/linux/err.h
81737+++ b/include/linux/err.h
81738@@ -20,12 +20,12 @@
81739
81740 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
81741
81742-static inline void * __must_check ERR_PTR(long error)
81743+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
81744 {
81745 return (void *) error;
81746 }
81747
81748-static inline long __must_check PTR_ERR(__force const void *ptr)
81749+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
81750 {
81751 return (long) ptr;
81752 }
81753diff --git a/include/linux/extcon.h b/include/linux/extcon.h
81754index 36f49c4..a2a1f4c 100644
81755--- a/include/linux/extcon.h
81756+++ b/include/linux/extcon.h
81757@@ -135,7 +135,7 @@ struct extcon_dev {
81758 /* /sys/class/extcon/.../mutually_exclusive/... */
81759 struct attribute_group attr_g_muex;
81760 struct attribute **attrs_muex;
81761- struct device_attribute *d_attrs_muex;
81762+ device_attribute_no_const *d_attrs_muex;
81763 };
81764
81765 /**
81766diff --git a/include/linux/fb.h b/include/linux/fb.h
81767index b6bfda9..1f13487 100644
81768--- a/include/linux/fb.h
81769+++ b/include/linux/fb.h
81770@@ -305,7 +305,7 @@ struct fb_ops {
81771 /* called at KDB enter and leave time to prepare the console */
81772 int (*fb_debug_enter)(struct fb_info *info);
81773 int (*fb_debug_leave)(struct fb_info *info);
81774-};
81775+} __do_const;
81776
81777 #ifdef CONFIG_FB_TILEBLITTING
81778 #define FB_TILE_CURSOR_NONE 0
81779diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
81780index 230f87b..1fd0485 100644
81781--- a/include/linux/fdtable.h
81782+++ b/include/linux/fdtable.h
81783@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
81784 void put_files_struct(struct files_struct *fs);
81785 void reset_files_struct(struct files_struct *);
81786 int unshare_files(struct files_struct **);
81787-struct files_struct *dup_fd(struct files_struct *, int *);
81788+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
81789 void do_close_on_exec(struct files_struct *);
81790 int iterate_fd(struct files_struct *, unsigned,
81791 int (*)(const void *, struct file *, unsigned),
81792diff --git a/include/linux/filter.h b/include/linux/filter.h
81793index a7e3c48..e568c8e 100644
81794--- a/include/linux/filter.h
81795+++ b/include/linux/filter.h
81796@@ -9,330 +9,28 @@
81797 #include <linux/workqueue.h>
81798 #include <uapi/linux/filter.h>
81799
81800-/* Internally used and optimized filter representation with extended
81801- * instruction set based on top of classic BPF.
81802- */
81803-
81804-/* instruction classes */
81805-#define BPF_ALU64 0x07 /* alu mode in double word width */
81806-
81807-/* ld/ldx fields */
81808-#define BPF_DW 0x18 /* double word */
81809-#define BPF_XADD 0xc0 /* exclusive add */
81810-
81811-/* alu/jmp fields */
81812-#define BPF_MOV 0xb0 /* mov reg to reg */
81813-#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
81814-
81815-/* change endianness of a register */
81816-#define BPF_END 0xd0 /* flags for endianness conversion: */
81817-#define BPF_TO_LE 0x00 /* convert to little-endian */
81818-#define BPF_TO_BE 0x08 /* convert to big-endian */
81819-#define BPF_FROM_LE BPF_TO_LE
81820-#define BPF_FROM_BE BPF_TO_BE
81821-
81822-#define BPF_JNE 0x50 /* jump != */
81823-#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
81824-#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
81825-#define BPF_CALL 0x80 /* function call */
81826-#define BPF_EXIT 0x90 /* function return */
81827-
81828-/* Register numbers */
81829-enum {
81830- BPF_REG_0 = 0,
81831- BPF_REG_1,
81832- BPF_REG_2,
81833- BPF_REG_3,
81834- BPF_REG_4,
81835- BPF_REG_5,
81836- BPF_REG_6,
81837- BPF_REG_7,
81838- BPF_REG_8,
81839- BPF_REG_9,
81840- BPF_REG_10,
81841- __MAX_BPF_REG,
81842-};
81843-
81844-/* BPF has 10 general purpose 64-bit registers and stack frame. */
81845-#define MAX_BPF_REG __MAX_BPF_REG
81846-
81847-/* ArgX, context and stack frame pointer register positions. Note,
81848- * Arg1, Arg2, Arg3, etc are used as argument mappings of function
81849- * calls in BPF_CALL instruction.
81850- */
81851-#define BPF_REG_ARG1 BPF_REG_1
81852-#define BPF_REG_ARG2 BPF_REG_2
81853-#define BPF_REG_ARG3 BPF_REG_3
81854-#define BPF_REG_ARG4 BPF_REG_4
81855-#define BPF_REG_ARG5 BPF_REG_5
81856-#define BPF_REG_CTX BPF_REG_6
81857-#define BPF_REG_FP BPF_REG_10
81858-
81859-/* Additional register mappings for converted user programs. */
81860-#define BPF_REG_A BPF_REG_0
81861-#define BPF_REG_X BPF_REG_7
81862-#define BPF_REG_TMP BPF_REG_8
81863-
81864-/* BPF program can access up to 512 bytes of stack space. */
81865-#define MAX_BPF_STACK 512
81866-
81867-/* Helper macros for filter block array initializers. */
81868-
81869-/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
81870-
81871-#define BPF_ALU64_REG(OP, DST, SRC) \
81872- ((struct sock_filter_int) { \
81873- .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
81874- .dst_reg = DST, \
81875- .src_reg = SRC, \
81876- .off = 0, \
81877- .imm = 0 })
81878-
81879-#define BPF_ALU32_REG(OP, DST, SRC) \
81880- ((struct sock_filter_int) { \
81881- .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
81882- .dst_reg = DST, \
81883- .src_reg = SRC, \
81884- .off = 0, \
81885- .imm = 0 })
81886-
81887-/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
81888-
81889-#define BPF_ALU64_IMM(OP, DST, IMM) \
81890- ((struct sock_filter_int) { \
81891- .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
81892- .dst_reg = DST, \
81893- .src_reg = 0, \
81894- .off = 0, \
81895- .imm = IMM })
81896-
81897-#define BPF_ALU32_IMM(OP, DST, IMM) \
81898- ((struct sock_filter_int) { \
81899- .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
81900- .dst_reg = DST, \
81901- .src_reg = 0, \
81902- .off = 0, \
81903- .imm = IMM })
81904-
81905-/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
81906-
81907-#define BPF_ENDIAN(TYPE, DST, LEN) \
81908- ((struct sock_filter_int) { \
81909- .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
81910- .dst_reg = DST, \
81911- .src_reg = 0, \
81912- .off = 0, \
81913- .imm = LEN })
81914-
81915-/* Short form of mov, dst_reg = src_reg */
81916-
81917-#define BPF_MOV64_REG(DST, SRC) \
81918- ((struct sock_filter_int) { \
81919- .code = BPF_ALU64 | BPF_MOV | BPF_X, \
81920- .dst_reg = DST, \
81921- .src_reg = SRC, \
81922- .off = 0, \
81923- .imm = 0 })
81924-
81925-#define BPF_MOV32_REG(DST, SRC) \
81926- ((struct sock_filter_int) { \
81927- .code = BPF_ALU | BPF_MOV | BPF_X, \
81928- .dst_reg = DST, \
81929- .src_reg = SRC, \
81930- .off = 0, \
81931- .imm = 0 })
81932-
81933-/* Short form of mov, dst_reg = imm32 */
81934-
81935-#define BPF_MOV64_IMM(DST, IMM) \
81936- ((struct sock_filter_int) { \
81937- .code = BPF_ALU64 | BPF_MOV | BPF_K, \
81938- .dst_reg = DST, \
81939- .src_reg = 0, \
81940- .off = 0, \
81941- .imm = IMM })
81942-
81943-#define BPF_MOV32_IMM(DST, IMM) \
81944- ((struct sock_filter_int) { \
81945- .code = BPF_ALU | BPF_MOV | BPF_K, \
81946- .dst_reg = DST, \
81947- .src_reg = 0, \
81948- .off = 0, \
81949- .imm = IMM })
81950-
81951-/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
81952-
81953-#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
81954- ((struct sock_filter_int) { \
81955- .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
81956- .dst_reg = DST, \
81957- .src_reg = SRC, \
81958- .off = 0, \
81959- .imm = IMM })
81960-
81961-#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
81962- ((struct sock_filter_int) { \
81963- .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
81964- .dst_reg = DST, \
81965- .src_reg = SRC, \
81966- .off = 0, \
81967- .imm = IMM })
81968-
81969-/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
81970-
81971-#define BPF_LD_ABS(SIZE, IMM) \
81972- ((struct sock_filter_int) { \
81973- .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
81974- .dst_reg = 0, \
81975- .src_reg = 0, \
81976- .off = 0, \
81977- .imm = IMM })
81978-
81979-/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
81980-
81981-#define BPF_LD_IND(SIZE, SRC, IMM) \
81982- ((struct sock_filter_int) { \
81983- .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
81984- .dst_reg = 0, \
81985- .src_reg = SRC, \
81986- .off = 0, \
81987- .imm = IMM })
81988-
81989-/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
81990-
81991-#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
81992- ((struct sock_filter_int) { \
81993- .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
81994- .dst_reg = DST, \
81995- .src_reg = SRC, \
81996- .off = OFF, \
81997- .imm = 0 })
81998-
81999-/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
82000-
82001-#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
82002- ((struct sock_filter_int) { \
82003- .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
82004- .dst_reg = DST, \
82005- .src_reg = SRC, \
82006- .off = OFF, \
82007- .imm = 0 })
82008-
82009-/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
82010-
82011-#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
82012- ((struct sock_filter_int) { \
82013- .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
82014- .dst_reg = DST, \
82015- .src_reg = 0, \
82016- .off = OFF, \
82017- .imm = IMM })
82018-
82019-/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
82020-
82021-#define BPF_JMP_REG(OP, DST, SRC, OFF) \
82022- ((struct sock_filter_int) { \
82023- .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
82024- .dst_reg = DST, \
82025- .src_reg = SRC, \
82026- .off = OFF, \
82027- .imm = 0 })
82028-
82029-/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
82030-
82031-#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
82032- ((struct sock_filter_int) { \
82033- .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
82034- .dst_reg = DST, \
82035- .src_reg = 0, \
82036- .off = OFF, \
82037- .imm = IMM })
82038-
82039-/* Function call */
82040-
82041-#define BPF_EMIT_CALL(FUNC) \
82042- ((struct sock_filter_int) { \
82043- .code = BPF_JMP | BPF_CALL, \
82044- .dst_reg = 0, \
82045- .src_reg = 0, \
82046- .off = 0, \
82047- .imm = ((FUNC) - __bpf_call_base) })
82048-
82049-/* Raw code statement block */
82050-
82051-#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
82052- ((struct sock_filter_int) { \
82053- .code = CODE, \
82054- .dst_reg = DST, \
82055- .src_reg = SRC, \
82056- .off = OFF, \
82057- .imm = IMM })
82058-
82059-/* Program exit */
82060-
82061-#define BPF_EXIT_INSN() \
82062- ((struct sock_filter_int) { \
82063- .code = BPF_JMP | BPF_EXIT, \
82064- .dst_reg = 0, \
82065- .src_reg = 0, \
82066- .off = 0, \
82067- .imm = 0 })
82068-
82069-#define bytes_to_bpf_size(bytes) \
82070-({ \
82071- int bpf_size = -EINVAL; \
82072- \
82073- if (bytes == sizeof(u8)) \
82074- bpf_size = BPF_B; \
82075- else if (bytes == sizeof(u16)) \
82076- bpf_size = BPF_H; \
82077- else if (bytes == sizeof(u32)) \
82078- bpf_size = BPF_W; \
82079- else if (bytes == sizeof(u64)) \
82080- bpf_size = BPF_DW; \
82081- \
82082- bpf_size; \
82083-})
82084-
82085-/* Macro to invoke filter function. */
82086-#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
82087-
82088-struct sock_filter_int {
82089- __u8 code; /* opcode */
82090- __u8 dst_reg:4; /* dest register */
82091- __u8 src_reg:4; /* source register */
82092- __s16 off; /* signed offset */
82093- __s32 imm; /* signed immediate constant */
82094-};
82095-
82096 #ifdef CONFIG_COMPAT
82097-/* A struct sock_filter is architecture independent. */
82098+/*
82099+ * A struct sock_filter is architecture independent.
82100+ */
82101 struct compat_sock_fprog {
82102 u16 len;
82103- compat_uptr_t filter; /* struct sock_filter * */
82104+ compat_uptr_t filter; /* struct sock_filter * */
82105 };
82106 #endif
82107
82108-struct sock_fprog_kern {
82109- u16 len;
82110- struct sock_filter *filter;
82111-};
82112-
82113 struct sk_buff;
82114 struct sock;
82115-struct seccomp_data;
82116
82117-struct sk_filter {
82118+struct sk_filter
82119+{
82120 atomic_t refcnt;
82121- u32 jited:1, /* Is our filter JIT'ed? */
82122- len:31; /* Number of filter blocks */
82123- struct sock_fprog_kern *orig_prog; /* Original BPF program */
82124+ unsigned int len; /* Number of filter blocks */
82125 struct rcu_head rcu;
82126 unsigned int (*bpf_func)(const struct sk_buff *skb,
82127- const struct sock_filter_int *filter);
82128+ const struct sock_filter *filter);
82129 union {
82130- struct sock_filter insns[0];
82131- struct sock_filter_int insnsi[0];
82132+ struct sock_filter insns[0];
82133 struct work_struct work;
82134 };
82135 };
82136@@ -343,76 +41,25 @@ static inline unsigned int sk_filter_size(unsigned int proglen)
82137 offsetof(struct sk_filter, insns[proglen]));
82138 }
82139
82140-#define sk_filter_proglen(fprog) \
82141- (fprog->len * sizeof(fprog->filter[0]))
82142-
82143-int sk_filter(struct sock *sk, struct sk_buff *skb);
82144-
82145-void sk_filter_select_runtime(struct sk_filter *fp);
82146-void sk_filter_free(struct sk_filter *fp);
82147-
82148-int sk_convert_filter(struct sock_filter *prog, int len,
82149- struct sock_filter_int *new_prog, int *new_len);
82150-
82151-int sk_unattached_filter_create(struct sk_filter **pfp,
82152- struct sock_fprog_kern *fprog);
82153-void sk_unattached_filter_destroy(struct sk_filter *fp);
82154-
82155-int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
82156-int sk_detach_filter(struct sock *sk);
82157-
82158-int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
82159-int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
82160- unsigned int len);
82161-
82162-void sk_filter_charge(struct sock *sk, struct sk_filter *fp);
82163-void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
82164-
82165-u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
82166-void bpf_int_jit_compile(struct sk_filter *fp);
82167-
82168-#define BPF_ANC BIT(15)
82169-
82170-static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
82171-{
82172- BUG_ON(ftest->code & BPF_ANC);
82173-
82174- switch (ftest->code) {
82175- case BPF_LD | BPF_W | BPF_ABS:
82176- case BPF_LD | BPF_H | BPF_ABS:
82177- case BPF_LD | BPF_B | BPF_ABS:
82178-#define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
82179- return BPF_ANC | SKF_AD_##CODE
82180- switch (ftest->k) {
82181- BPF_ANCILLARY(PROTOCOL);
82182- BPF_ANCILLARY(PKTTYPE);
82183- BPF_ANCILLARY(IFINDEX);
82184- BPF_ANCILLARY(NLATTR);
82185- BPF_ANCILLARY(NLATTR_NEST);
82186- BPF_ANCILLARY(MARK);
82187- BPF_ANCILLARY(QUEUE);
82188- BPF_ANCILLARY(HATYPE);
82189- BPF_ANCILLARY(RXHASH);
82190- BPF_ANCILLARY(CPU);
82191- BPF_ANCILLARY(ALU_XOR_X);
82192- BPF_ANCILLARY(VLAN_TAG);
82193- BPF_ANCILLARY(VLAN_TAG_PRESENT);
82194- BPF_ANCILLARY(PAY_OFFSET);
82195- BPF_ANCILLARY(RANDOM);
82196- }
82197- /* Fallthrough. */
82198- default:
82199- return ftest->code;
82200- }
82201-}
82202+extern int sk_filter(struct sock *sk, struct sk_buff *skb);
82203+extern unsigned int sk_run_filter(const struct sk_buff *skb,
82204+ const struct sock_filter *filter);
82205+extern int sk_unattached_filter_create(struct sk_filter **pfp,
82206+ struct sock_fprog *fprog);
82207+extern void sk_unattached_filter_destroy(struct sk_filter *fp);
82208+extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
82209+extern int sk_detach_filter(struct sock *sk);
82210+extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
82211+extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len);
82212+extern void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
82213
82214 #ifdef CONFIG_BPF_JIT
82215 #include <stdarg.h>
82216 #include <linux/linkage.h>
82217 #include <linux/printk.h>
82218
82219-void bpf_jit_compile(struct sk_filter *fp);
82220-void bpf_jit_free(struct sk_filter *fp);
82221+extern void bpf_jit_compile(struct sk_filter *fp);
82222+extern void bpf_jit_free(struct sk_filter *fp);
82223
82224 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
82225 u32 pass, void *image)
82226@@ -423,22 +70,90 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
82227 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
82228 16, 1, image, proglen, false);
82229 }
82230+#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
82231 #else
82232 #include <linux/slab.h>
82233-
82234 static inline void bpf_jit_compile(struct sk_filter *fp)
82235 {
82236 }
82237-
82238 static inline void bpf_jit_free(struct sk_filter *fp)
82239 {
82240 kfree(fp);
82241 }
82242-#endif /* CONFIG_BPF_JIT */
82243+#define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
82244+#endif
82245
82246 static inline int bpf_tell_extensions(void)
82247 {
82248 return SKF_AD_MAX;
82249 }
82250
82251+enum {
82252+ BPF_S_RET_K = 1,
82253+ BPF_S_RET_A,
82254+ BPF_S_ALU_ADD_K,
82255+ BPF_S_ALU_ADD_X,
82256+ BPF_S_ALU_SUB_K,
82257+ BPF_S_ALU_SUB_X,
82258+ BPF_S_ALU_MUL_K,
82259+ BPF_S_ALU_MUL_X,
82260+ BPF_S_ALU_DIV_X,
82261+ BPF_S_ALU_MOD_K,
82262+ BPF_S_ALU_MOD_X,
82263+ BPF_S_ALU_AND_K,
82264+ BPF_S_ALU_AND_X,
82265+ BPF_S_ALU_OR_K,
82266+ BPF_S_ALU_OR_X,
82267+ BPF_S_ALU_XOR_K,
82268+ BPF_S_ALU_XOR_X,
82269+ BPF_S_ALU_LSH_K,
82270+ BPF_S_ALU_LSH_X,
82271+ BPF_S_ALU_RSH_K,
82272+ BPF_S_ALU_RSH_X,
82273+ BPF_S_ALU_NEG,
82274+ BPF_S_LD_W_ABS,
82275+ BPF_S_LD_H_ABS,
82276+ BPF_S_LD_B_ABS,
82277+ BPF_S_LD_W_LEN,
82278+ BPF_S_LD_W_IND,
82279+ BPF_S_LD_H_IND,
82280+ BPF_S_LD_B_IND,
82281+ BPF_S_LD_IMM,
82282+ BPF_S_LDX_W_LEN,
82283+ BPF_S_LDX_B_MSH,
82284+ BPF_S_LDX_IMM,
82285+ BPF_S_MISC_TAX,
82286+ BPF_S_MISC_TXA,
82287+ BPF_S_ALU_DIV_K,
82288+ BPF_S_LD_MEM,
82289+ BPF_S_LDX_MEM,
82290+ BPF_S_ST,
82291+ BPF_S_STX,
82292+ BPF_S_JMP_JA,
82293+ BPF_S_JMP_JEQ_K,
82294+ BPF_S_JMP_JEQ_X,
82295+ BPF_S_JMP_JGE_K,
82296+ BPF_S_JMP_JGE_X,
82297+ BPF_S_JMP_JGT_K,
82298+ BPF_S_JMP_JGT_X,
82299+ BPF_S_JMP_JSET_K,
82300+ BPF_S_JMP_JSET_X,
82301+ /* Ancillary data */
82302+ BPF_S_ANC_PROTOCOL,
82303+ BPF_S_ANC_PKTTYPE,
82304+ BPF_S_ANC_IFINDEX,
82305+ BPF_S_ANC_NLATTR,
82306+ BPF_S_ANC_NLATTR_NEST,
82307+ BPF_S_ANC_MARK,
82308+ BPF_S_ANC_QUEUE,
82309+ BPF_S_ANC_HATYPE,
82310+ BPF_S_ANC_RXHASH,
82311+ BPF_S_ANC_CPU,
82312+ BPF_S_ANC_ALU_XOR_X,
82313+ BPF_S_ANC_SECCOMP_LD_W,
82314+ BPF_S_ANC_VLAN_TAG,
82315+ BPF_S_ANC_VLAN_TAG_PRESENT,
82316+ BPF_S_ANC_PAY_OFFSET,
82317+};
82318+
82319 #endif /* __LINUX_FILTER_H__ */
82320diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
82321index 8293262..2b3b8bd 100644
82322--- a/include/linux/frontswap.h
82323+++ b/include/linux/frontswap.h
82324@@ -11,7 +11,7 @@ struct frontswap_ops {
82325 int (*load)(unsigned, pgoff_t, struct page *);
82326 void (*invalidate_page)(unsigned, pgoff_t);
82327 void (*invalidate_area)(unsigned);
82328-};
82329+} __no_const;
82330
82331 extern bool frontswap_enabled;
82332 extern struct frontswap_ops *
82333diff --git a/include/linux/fs.h b/include/linux/fs.h
82334index e11d60c..901317a 100644
82335--- a/include/linux/fs.h
82336+++ b/include/linux/fs.h
82337@@ -401,7 +401,7 @@ struct address_space {
82338 spinlock_t private_lock; /* for use by the address_space */
82339 struct list_head private_list; /* ditto */
82340 void *private_data; /* ditto */
82341-} __attribute__((aligned(sizeof(long))));
82342+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
82343 /*
82344 * On most architectures that alignment is already the case; but
82345 * must be enforced here for CRIS, to let the least significant bit
82346@@ -444,7 +444,7 @@ struct block_device {
82347 int bd_fsfreeze_count;
82348 /* Mutex for freeze */
82349 struct mutex bd_fsfreeze_mutex;
82350-};
82351+} __randomize_layout;
82352
82353 /*
82354 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
82355@@ -588,7 +588,7 @@ struct inode {
82356 #endif
82357
82358 void *i_private; /* fs or device private pointer */
82359-};
82360+} __randomize_layout;
82361
82362 static inline int inode_unhashed(struct inode *inode)
82363 {
82364@@ -781,7 +781,7 @@ struct file {
82365 struct list_head f_tfile_llink;
82366 #endif /* #ifdef CONFIG_EPOLL */
82367 struct address_space *f_mapping;
82368-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
82369+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
82370
82371 struct file_handle {
82372 __u32 handle_bytes;
82373@@ -909,7 +909,7 @@ struct file_lock {
82374 int state; /* state of grant or error if -ve */
82375 } afs;
82376 } fl_u;
82377-};
82378+} __randomize_layout;
82379
82380 /* The following constant reflects the upper bound of the file/locking space */
82381 #ifndef OFFSET_MAX
82382@@ -1258,7 +1258,7 @@ struct super_block {
82383 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
82384 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
82385 struct rcu_head rcu;
82386-};
82387+} __randomize_layout;
82388
82389 extern struct timespec current_fs_time(struct super_block *sb);
82390
82391@@ -1484,7 +1484,8 @@ struct file_operations {
82392 long (*fallocate)(struct file *file, int mode, loff_t offset,
82393 loff_t len);
82394 int (*show_fdinfo)(struct seq_file *m, struct file *f);
82395-};
82396+} __do_const __randomize_layout;
82397+typedef struct file_operations __no_const file_operations_no_const;
82398
82399 struct inode_operations {
82400 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
82401@@ -2769,4 +2770,14 @@ static inline bool dir_relax(struct inode *inode)
82402 return !IS_DEADDIR(inode);
82403 }
82404
82405+static inline bool is_sidechannel_device(const struct inode *inode)
82406+{
82407+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
82408+ umode_t mode = inode->i_mode;
82409+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
82410+#else
82411+ return false;
82412+#endif
82413+}
82414+
82415 #endif /* _LINUX_FS_H */
82416diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
82417index 0efc3e6..fd23610 100644
82418--- a/include/linux/fs_struct.h
82419+++ b/include/linux/fs_struct.h
82420@@ -6,13 +6,13 @@
82421 #include <linux/seqlock.h>
82422
82423 struct fs_struct {
82424- int users;
82425+ atomic_t users;
82426 spinlock_t lock;
82427 seqcount_t seq;
82428 int umask;
82429 int in_exec;
82430 struct path root, pwd;
82431-};
82432+} __randomize_layout;
82433
82434 extern struct kmem_cache *fs_cachep;
82435
82436diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
82437index 7714849..a4a5c7a 100644
82438--- a/include/linux/fscache-cache.h
82439+++ b/include/linux/fscache-cache.h
82440@@ -113,7 +113,7 @@ struct fscache_operation {
82441 fscache_operation_release_t release;
82442 };
82443
82444-extern atomic_t fscache_op_debug_id;
82445+extern atomic_unchecked_t fscache_op_debug_id;
82446 extern void fscache_op_work_func(struct work_struct *work);
82447
82448 extern void fscache_enqueue_operation(struct fscache_operation *);
82449@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
82450 INIT_WORK(&op->work, fscache_op_work_func);
82451 atomic_set(&op->usage, 1);
82452 op->state = FSCACHE_OP_ST_INITIALISED;
82453- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
82454+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
82455 op->processor = processor;
82456 op->release = release;
82457 INIT_LIST_HEAD(&op->pend_link);
82458diff --git a/include/linux/fscache.h b/include/linux/fscache.h
82459index 115bb81..e7b812b 100644
82460--- a/include/linux/fscache.h
82461+++ b/include/linux/fscache.h
82462@@ -152,7 +152,7 @@ struct fscache_cookie_def {
82463 * - this is mandatory for any object that may have data
82464 */
82465 void (*now_uncached)(void *cookie_netfs_data);
82466-};
82467+} __do_const;
82468
82469 /*
82470 * fscache cached network filesystem type
82471diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
82472index 1c804b0..1432c2b 100644
82473--- a/include/linux/fsnotify.h
82474+++ b/include/linux/fsnotify.h
82475@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
82476 struct inode *inode = file_inode(file);
82477 __u32 mask = FS_ACCESS;
82478
82479+ if (is_sidechannel_device(inode))
82480+ return;
82481+
82482 if (S_ISDIR(inode->i_mode))
82483 mask |= FS_ISDIR;
82484
82485@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
82486 struct inode *inode = file_inode(file);
82487 __u32 mask = FS_MODIFY;
82488
82489+ if (is_sidechannel_device(inode))
82490+ return;
82491+
82492 if (S_ISDIR(inode->i_mode))
82493 mask |= FS_ISDIR;
82494
82495@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
82496 */
82497 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
82498 {
82499- return kstrdup(name, GFP_KERNEL);
82500+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
82501 }
82502
82503 /*
82504diff --git a/include/linux/genhd.h b/include/linux/genhd.h
82505index ec274e0..e678159 100644
82506--- a/include/linux/genhd.h
82507+++ b/include/linux/genhd.h
82508@@ -194,7 +194,7 @@ struct gendisk {
82509 struct kobject *slave_dir;
82510
82511 struct timer_rand_state *random;
82512- atomic_t sync_io; /* RAID */
82513+ atomic_unchecked_t sync_io; /* RAID */
82514 struct disk_events *ev;
82515 #ifdef CONFIG_BLK_DEV_INTEGRITY
82516 struct blk_integrity *integrity;
82517@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
82518 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
82519
82520 /* drivers/char/random.c */
82521-extern void add_disk_randomness(struct gendisk *disk);
82522+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
82523 extern void rand_initialize_disk(struct gendisk *disk);
82524
82525 static inline sector_t get_start_sect(struct block_device *bdev)
82526diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
82527index c0894dd..2fbf10c 100644
82528--- a/include/linux/genl_magic_func.h
82529+++ b/include/linux/genl_magic_func.h
82530@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
82531 },
82532
82533 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
82534-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
82535+static struct genl_ops ZZZ_genl_ops[] = {
82536 #include GENL_MAGIC_INCLUDE_FILE
82537 };
82538
82539diff --git a/include/linux/gfp.h b/include/linux/gfp.h
82540index 6eb1fb3..30fe7e4 100644
82541--- a/include/linux/gfp.h
82542+++ b/include/linux/gfp.h
82543@@ -34,6 +34,13 @@ struct vm_area_struct;
82544 #define ___GFP_NO_KSWAPD 0x400000u
82545 #define ___GFP_OTHER_NODE 0x800000u
82546 #define ___GFP_WRITE 0x1000000u
82547+
82548+#ifdef CONFIG_PAX_USERCOPY_SLABS
82549+#define ___GFP_USERCOPY 0x2000000u
82550+#else
82551+#define ___GFP_USERCOPY 0
82552+#endif
82553+
82554 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
82555
82556 /*
82557@@ -90,6 +97,7 @@ struct vm_area_struct;
82558 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
82559 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
82560 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
82561+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
82562
82563 /*
82564 * This may seem redundant, but it's a way of annotating false positives vs.
82565@@ -97,7 +105,7 @@ struct vm_area_struct;
82566 */
82567 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
82568
82569-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
82570+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
82571 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
82572
82573 /* This equals 0, but use constants in case they ever change */
82574@@ -155,6 +163,8 @@ struct vm_area_struct;
82575 /* 4GB DMA on some platforms */
82576 #define GFP_DMA32 __GFP_DMA32
82577
82578+#define GFP_USERCOPY __GFP_USERCOPY
82579+
82580 /* Convert GFP flags to their corresponding migrate type */
82581 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
82582 {
82583diff --git a/include/linux/gracl.h b/include/linux/gracl.h
82584new file mode 100644
82585index 0000000..edb2cb6
82586--- /dev/null
82587+++ b/include/linux/gracl.h
82588@@ -0,0 +1,340 @@
82589+#ifndef GR_ACL_H
82590+#define GR_ACL_H
82591+
82592+#include <linux/grdefs.h>
82593+#include <linux/resource.h>
82594+#include <linux/capability.h>
82595+#include <linux/dcache.h>
82596+#include <asm/resource.h>
82597+
82598+/* Major status information */
82599+
82600+#define GR_VERSION "grsecurity 3.0"
82601+#define GRSECURITY_VERSION 0x3000
82602+
82603+enum {
82604+ GR_SHUTDOWN = 0,
82605+ GR_ENABLE = 1,
82606+ GR_SPROLE = 2,
82607+ GR_OLDRELOAD = 3,
82608+ GR_SEGVMOD = 4,
82609+ GR_STATUS = 5,
82610+ GR_UNSPROLE = 6,
82611+ GR_PASSSET = 7,
82612+ GR_SPROLEPAM = 8,
82613+ GR_RELOAD = 9,
82614+};
82615+
82616+/* Password setup definitions
82617+ * kernel/grhash.c */
82618+enum {
82619+ GR_PW_LEN = 128,
82620+ GR_SALT_LEN = 16,
82621+ GR_SHA_LEN = 32,
82622+};
82623+
82624+enum {
82625+ GR_SPROLE_LEN = 64,
82626+};
82627+
82628+enum {
82629+ GR_NO_GLOB = 0,
82630+ GR_REG_GLOB,
82631+ GR_CREATE_GLOB
82632+};
82633+
82634+#define GR_NLIMITS 32
82635+
82636+/* Begin Data Structures */
82637+
82638+struct sprole_pw {
82639+ unsigned char *rolename;
82640+ unsigned char salt[GR_SALT_LEN];
82641+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
82642+};
82643+
82644+struct name_entry {
82645+ __u32 key;
82646+ ino_t inode;
82647+ dev_t device;
82648+ char *name;
82649+ __u16 len;
82650+ __u8 deleted;
82651+ struct name_entry *prev;
82652+ struct name_entry *next;
82653+};
82654+
82655+struct inodev_entry {
82656+ struct name_entry *nentry;
82657+ struct inodev_entry *prev;
82658+ struct inodev_entry *next;
82659+};
82660+
82661+struct acl_role_db {
82662+ struct acl_role_label **r_hash;
82663+ __u32 r_size;
82664+};
82665+
82666+struct inodev_db {
82667+ struct inodev_entry **i_hash;
82668+ __u32 i_size;
82669+};
82670+
82671+struct name_db {
82672+ struct name_entry **n_hash;
82673+ __u32 n_size;
82674+};
82675+
82676+struct crash_uid {
82677+ uid_t uid;
82678+ unsigned long expires;
82679+};
82680+
82681+struct gr_hash_struct {
82682+ void **table;
82683+ void **nametable;
82684+ void *first;
82685+ __u32 table_size;
82686+ __u32 used_size;
82687+ int type;
82688+};
82689+
82690+/* Userspace Grsecurity ACL data structures */
82691+
82692+struct acl_subject_label {
82693+ char *filename;
82694+ ino_t inode;
82695+ dev_t device;
82696+ __u32 mode;
82697+ kernel_cap_t cap_mask;
82698+ kernel_cap_t cap_lower;
82699+ kernel_cap_t cap_invert_audit;
82700+
82701+ struct rlimit res[GR_NLIMITS];
82702+ __u32 resmask;
82703+
82704+ __u8 user_trans_type;
82705+ __u8 group_trans_type;
82706+ uid_t *user_transitions;
82707+ gid_t *group_transitions;
82708+ __u16 user_trans_num;
82709+ __u16 group_trans_num;
82710+
82711+ __u32 sock_families[2];
82712+ __u32 ip_proto[8];
82713+ __u32 ip_type;
82714+ struct acl_ip_label **ips;
82715+ __u32 ip_num;
82716+ __u32 inaddr_any_override;
82717+
82718+ __u32 crashes;
82719+ unsigned long expires;
82720+
82721+ struct acl_subject_label *parent_subject;
82722+ struct gr_hash_struct *hash;
82723+ struct acl_subject_label *prev;
82724+ struct acl_subject_label *next;
82725+
82726+ struct acl_object_label **obj_hash;
82727+ __u32 obj_hash_size;
82728+ __u16 pax_flags;
82729+};
82730+
82731+struct role_allowed_ip {
82732+ __u32 addr;
82733+ __u32 netmask;
82734+
82735+ struct role_allowed_ip *prev;
82736+ struct role_allowed_ip *next;
82737+};
82738+
82739+struct role_transition {
82740+ char *rolename;
82741+
82742+ struct role_transition *prev;
82743+ struct role_transition *next;
82744+};
82745+
82746+struct acl_role_label {
82747+ char *rolename;
82748+ uid_t uidgid;
82749+ __u16 roletype;
82750+
82751+ __u16 auth_attempts;
82752+ unsigned long expires;
82753+
82754+ struct acl_subject_label *root_label;
82755+ struct gr_hash_struct *hash;
82756+
82757+ struct acl_role_label *prev;
82758+ struct acl_role_label *next;
82759+
82760+ struct role_transition *transitions;
82761+ struct role_allowed_ip *allowed_ips;
82762+ uid_t *domain_children;
82763+ __u16 domain_child_num;
82764+
82765+ umode_t umask;
82766+
82767+ struct acl_subject_label **subj_hash;
82768+ __u32 subj_hash_size;
82769+};
82770+
82771+struct user_acl_role_db {
82772+ struct acl_role_label **r_table;
82773+ __u32 num_pointers; /* Number of allocations to track */
82774+ __u32 num_roles; /* Number of roles */
82775+ __u32 num_domain_children; /* Number of domain children */
82776+ __u32 num_subjects; /* Number of subjects */
82777+ __u32 num_objects; /* Number of objects */
82778+};
82779+
82780+struct acl_object_label {
82781+ char *filename;
82782+ ino_t inode;
82783+ dev_t device;
82784+ __u32 mode;
82785+
82786+ struct acl_subject_label *nested;
82787+ struct acl_object_label *globbed;
82788+
82789+ /* next two structures not used */
82790+
82791+ struct acl_object_label *prev;
82792+ struct acl_object_label *next;
82793+};
82794+
82795+struct acl_ip_label {
82796+ char *iface;
82797+ __u32 addr;
82798+ __u32 netmask;
82799+ __u16 low, high;
82800+ __u8 mode;
82801+ __u32 type;
82802+ __u32 proto[8];
82803+
82804+ /* next two structures not used */
82805+
82806+ struct acl_ip_label *prev;
82807+ struct acl_ip_label *next;
82808+};
82809+
82810+struct gr_arg {
82811+ struct user_acl_role_db role_db;
82812+ unsigned char pw[GR_PW_LEN];
82813+ unsigned char salt[GR_SALT_LEN];
82814+ unsigned char sum[GR_SHA_LEN];
82815+ unsigned char sp_role[GR_SPROLE_LEN];
82816+ struct sprole_pw *sprole_pws;
82817+ dev_t segv_device;
82818+ ino_t segv_inode;
82819+ uid_t segv_uid;
82820+ __u16 num_sprole_pws;
82821+ __u16 mode;
82822+};
82823+
82824+struct gr_arg_wrapper {
82825+ struct gr_arg *arg;
82826+ __u32 version;
82827+ __u32 size;
82828+};
82829+
82830+struct subject_map {
82831+ struct acl_subject_label *user;
82832+ struct acl_subject_label *kernel;
82833+ struct subject_map *prev;
82834+ struct subject_map *next;
82835+};
82836+
82837+struct acl_subj_map_db {
82838+ struct subject_map **s_hash;
82839+ __u32 s_size;
82840+};
82841+
82842+struct gr_policy_state {
82843+ struct sprole_pw **acl_special_roles;
82844+ __u16 num_sprole_pws;
82845+ struct acl_role_label *kernel_role;
82846+ struct acl_role_label *role_list;
82847+ struct acl_role_label *default_role;
82848+ struct acl_role_db acl_role_set;
82849+ struct acl_subj_map_db subj_map_set;
82850+ struct name_db name_set;
82851+ struct inodev_db inodev_set;
82852+};
82853+
82854+struct gr_alloc_state {
82855+ unsigned long alloc_stack_next;
82856+ unsigned long alloc_stack_size;
82857+ void **alloc_stack;
82858+};
82859+
82860+struct gr_reload_state {
82861+ struct gr_policy_state oldpolicy;
82862+ struct gr_alloc_state oldalloc;
82863+ struct gr_policy_state newpolicy;
82864+ struct gr_alloc_state newalloc;
82865+ struct gr_policy_state *oldpolicy_ptr;
82866+ struct gr_alloc_state *oldalloc_ptr;
82867+ unsigned char oldmode;
82868+};
82869+
82870+/* End Data Structures Section */
82871+
82872+/* Hash functions generated by empirical testing by Brad Spengler
82873+ Makes good use of the low bits of the inode. Generally 0-1 times
82874+ in loop for successful match. 0-3 for unsuccessful match.
82875+ Shift/add algorithm with modulus of table size and an XOR*/
82876+
82877+static __inline__ unsigned int
82878+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
82879+{
82880+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
82881+}
82882+
82883+ static __inline__ unsigned int
82884+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
82885+{
82886+ return ((const unsigned long)userp % sz);
82887+}
82888+
82889+static __inline__ unsigned int
82890+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
82891+{
82892+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
82893+}
82894+
82895+static __inline__ unsigned int
82896+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
82897+{
82898+ return full_name_hash((const unsigned char *)name, len) % sz;
82899+}
82900+
82901+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
82902+ subj = NULL; \
82903+ iter = 0; \
82904+ while (iter < role->subj_hash_size) { \
82905+ if (subj == NULL) \
82906+ subj = role->subj_hash[iter]; \
82907+ if (subj == NULL) { \
82908+ iter++; \
82909+ continue; \
82910+ }
82911+
82912+#define FOR_EACH_SUBJECT_END(subj,iter) \
82913+ subj = subj->next; \
82914+ if (subj == NULL) \
82915+ iter++; \
82916+ }
82917+
82918+
82919+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
82920+ subj = role->hash->first; \
82921+ while (subj != NULL) {
82922+
82923+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
82924+ subj = subj->next; \
82925+ }
82926+
82927+#endif
82928+
82929diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
82930new file mode 100644
82931index 0000000..33ebd1f
82932--- /dev/null
82933+++ b/include/linux/gracl_compat.h
82934@@ -0,0 +1,156 @@
82935+#ifndef GR_ACL_COMPAT_H
82936+#define GR_ACL_COMPAT_H
82937+
82938+#include <linux/resource.h>
82939+#include <asm/resource.h>
82940+
82941+struct sprole_pw_compat {
82942+ compat_uptr_t rolename;
82943+ unsigned char salt[GR_SALT_LEN];
82944+ unsigned char sum[GR_SHA_LEN];
82945+};
82946+
82947+struct gr_hash_struct_compat {
82948+ compat_uptr_t table;
82949+ compat_uptr_t nametable;
82950+ compat_uptr_t first;
82951+ __u32 table_size;
82952+ __u32 used_size;
82953+ int type;
82954+};
82955+
82956+struct acl_subject_label_compat {
82957+ compat_uptr_t filename;
82958+ compat_ino_t inode;
82959+ __u32 device;
82960+ __u32 mode;
82961+ kernel_cap_t cap_mask;
82962+ kernel_cap_t cap_lower;
82963+ kernel_cap_t cap_invert_audit;
82964+
82965+ struct compat_rlimit res[GR_NLIMITS];
82966+ __u32 resmask;
82967+
82968+ __u8 user_trans_type;
82969+ __u8 group_trans_type;
82970+ compat_uptr_t user_transitions;
82971+ compat_uptr_t group_transitions;
82972+ __u16 user_trans_num;
82973+ __u16 group_trans_num;
82974+
82975+ __u32 sock_families[2];
82976+ __u32 ip_proto[8];
82977+ __u32 ip_type;
82978+ compat_uptr_t ips;
82979+ __u32 ip_num;
82980+ __u32 inaddr_any_override;
82981+
82982+ __u32 crashes;
82983+ compat_ulong_t expires;
82984+
82985+ compat_uptr_t parent_subject;
82986+ compat_uptr_t hash;
82987+ compat_uptr_t prev;
82988+ compat_uptr_t next;
82989+
82990+ compat_uptr_t obj_hash;
82991+ __u32 obj_hash_size;
82992+ __u16 pax_flags;
82993+};
82994+
82995+struct role_allowed_ip_compat {
82996+ __u32 addr;
82997+ __u32 netmask;
82998+
82999+ compat_uptr_t prev;
83000+ compat_uptr_t next;
83001+};
83002+
83003+struct role_transition_compat {
83004+ compat_uptr_t rolename;
83005+
83006+ compat_uptr_t prev;
83007+ compat_uptr_t next;
83008+};
83009+
83010+struct acl_role_label_compat {
83011+ compat_uptr_t rolename;
83012+ uid_t uidgid;
83013+ __u16 roletype;
83014+
83015+ __u16 auth_attempts;
83016+ compat_ulong_t expires;
83017+
83018+ compat_uptr_t root_label;
83019+ compat_uptr_t hash;
83020+
83021+ compat_uptr_t prev;
83022+ compat_uptr_t next;
83023+
83024+ compat_uptr_t transitions;
83025+ compat_uptr_t allowed_ips;
83026+ compat_uptr_t domain_children;
83027+ __u16 domain_child_num;
83028+
83029+ umode_t umask;
83030+
83031+ compat_uptr_t subj_hash;
83032+ __u32 subj_hash_size;
83033+};
83034+
83035+struct user_acl_role_db_compat {
83036+ compat_uptr_t r_table;
83037+ __u32 num_pointers;
83038+ __u32 num_roles;
83039+ __u32 num_domain_children;
83040+ __u32 num_subjects;
83041+ __u32 num_objects;
83042+};
83043+
83044+struct acl_object_label_compat {
83045+ compat_uptr_t filename;
83046+ compat_ino_t inode;
83047+ __u32 device;
83048+ __u32 mode;
83049+
83050+ compat_uptr_t nested;
83051+ compat_uptr_t globbed;
83052+
83053+ compat_uptr_t prev;
83054+ compat_uptr_t next;
83055+};
83056+
83057+struct acl_ip_label_compat {
83058+ compat_uptr_t iface;
83059+ __u32 addr;
83060+ __u32 netmask;
83061+ __u16 low, high;
83062+ __u8 mode;
83063+ __u32 type;
83064+ __u32 proto[8];
83065+
83066+ compat_uptr_t prev;
83067+ compat_uptr_t next;
83068+};
83069+
83070+struct gr_arg_compat {
83071+ struct user_acl_role_db_compat role_db;
83072+ unsigned char pw[GR_PW_LEN];
83073+ unsigned char salt[GR_SALT_LEN];
83074+ unsigned char sum[GR_SHA_LEN];
83075+ unsigned char sp_role[GR_SPROLE_LEN];
83076+ compat_uptr_t sprole_pws;
83077+ __u32 segv_device;
83078+ compat_ino_t segv_inode;
83079+ uid_t segv_uid;
83080+ __u16 num_sprole_pws;
83081+ __u16 mode;
83082+};
83083+
83084+struct gr_arg_wrapper_compat {
83085+ compat_uptr_t arg;
83086+ __u32 version;
83087+ __u32 size;
83088+};
83089+
83090+#endif
83091diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
83092new file mode 100644
83093index 0000000..323ecf2
83094--- /dev/null
83095+++ b/include/linux/gralloc.h
83096@@ -0,0 +1,9 @@
83097+#ifndef __GRALLOC_H
83098+#define __GRALLOC_H
83099+
83100+void acl_free_all(void);
83101+int acl_alloc_stack_init(unsigned long size);
83102+void *acl_alloc(unsigned long len);
83103+void *acl_alloc_num(unsigned long num, unsigned long len);
83104+
83105+#endif
83106diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
83107new file mode 100644
83108index 0000000..be66033
83109--- /dev/null
83110+++ b/include/linux/grdefs.h
83111@@ -0,0 +1,140 @@
83112+#ifndef GRDEFS_H
83113+#define GRDEFS_H
83114+
83115+/* Begin grsecurity status declarations */
83116+
83117+enum {
83118+ GR_READY = 0x01,
83119+ GR_STATUS_INIT = 0x00 // disabled state
83120+};
83121+
83122+/* Begin ACL declarations */
83123+
83124+/* Role flags */
83125+
83126+enum {
83127+ GR_ROLE_USER = 0x0001,
83128+ GR_ROLE_GROUP = 0x0002,
83129+ GR_ROLE_DEFAULT = 0x0004,
83130+ GR_ROLE_SPECIAL = 0x0008,
83131+ GR_ROLE_AUTH = 0x0010,
83132+ GR_ROLE_NOPW = 0x0020,
83133+ GR_ROLE_GOD = 0x0040,
83134+ GR_ROLE_LEARN = 0x0080,
83135+ GR_ROLE_TPE = 0x0100,
83136+ GR_ROLE_DOMAIN = 0x0200,
83137+ GR_ROLE_PAM = 0x0400,
83138+ GR_ROLE_PERSIST = 0x0800
83139+};
83140+
83141+/* ACL Subject and Object mode flags */
83142+enum {
83143+ GR_DELETED = 0x80000000
83144+};
83145+
83146+/* ACL Object-only mode flags */
83147+enum {
83148+ GR_READ = 0x00000001,
83149+ GR_APPEND = 0x00000002,
83150+ GR_WRITE = 0x00000004,
83151+ GR_EXEC = 0x00000008,
83152+ GR_FIND = 0x00000010,
83153+ GR_INHERIT = 0x00000020,
83154+ GR_SETID = 0x00000040,
83155+ GR_CREATE = 0x00000080,
83156+ GR_DELETE = 0x00000100,
83157+ GR_LINK = 0x00000200,
83158+ GR_AUDIT_READ = 0x00000400,
83159+ GR_AUDIT_APPEND = 0x00000800,
83160+ GR_AUDIT_WRITE = 0x00001000,
83161+ GR_AUDIT_EXEC = 0x00002000,
83162+ GR_AUDIT_FIND = 0x00004000,
83163+ GR_AUDIT_INHERIT= 0x00008000,
83164+ GR_AUDIT_SETID = 0x00010000,
83165+ GR_AUDIT_CREATE = 0x00020000,
83166+ GR_AUDIT_DELETE = 0x00040000,
83167+ GR_AUDIT_LINK = 0x00080000,
83168+ GR_PTRACERD = 0x00100000,
83169+ GR_NOPTRACE = 0x00200000,
83170+ GR_SUPPRESS = 0x00400000,
83171+ GR_NOLEARN = 0x00800000,
83172+ GR_INIT_TRANSFER= 0x01000000
83173+};
83174+
83175+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
83176+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
83177+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
83178+
83179+/* ACL subject-only mode flags */
83180+enum {
83181+ GR_KILL = 0x00000001,
83182+ GR_VIEW = 0x00000002,
83183+ GR_PROTECTED = 0x00000004,
83184+ GR_LEARN = 0x00000008,
83185+ GR_OVERRIDE = 0x00000010,
83186+ /* just a placeholder, this mode is only used in userspace */
83187+ GR_DUMMY = 0x00000020,
83188+ GR_PROTSHM = 0x00000040,
83189+ GR_KILLPROC = 0x00000080,
83190+ GR_KILLIPPROC = 0x00000100,
83191+ /* just a placeholder, this mode is only used in userspace */
83192+ GR_NOTROJAN = 0x00000200,
83193+ GR_PROTPROCFD = 0x00000400,
83194+ GR_PROCACCT = 0x00000800,
83195+ GR_RELAXPTRACE = 0x00001000,
83196+ //GR_NESTED = 0x00002000,
83197+ GR_INHERITLEARN = 0x00004000,
83198+ GR_PROCFIND = 0x00008000,
83199+ GR_POVERRIDE = 0x00010000,
83200+ GR_KERNELAUTH = 0x00020000,
83201+ GR_ATSECURE = 0x00040000,
83202+ GR_SHMEXEC = 0x00080000
83203+};
83204+
83205+enum {
83206+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
83207+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
83208+ GR_PAX_ENABLE_MPROTECT = 0x0004,
83209+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
83210+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
83211+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
83212+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
83213+ GR_PAX_DISABLE_MPROTECT = 0x0400,
83214+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
83215+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
83216+};
83217+
83218+enum {
83219+ GR_ID_USER = 0x01,
83220+ GR_ID_GROUP = 0x02,
83221+};
83222+
83223+enum {
83224+ GR_ID_ALLOW = 0x01,
83225+ GR_ID_DENY = 0x02,
83226+};
83227+
83228+#define GR_CRASH_RES 31
83229+#define GR_UIDTABLE_MAX 500
83230+
83231+/* begin resource learning section */
83232+enum {
83233+ GR_RLIM_CPU_BUMP = 60,
83234+ GR_RLIM_FSIZE_BUMP = 50000,
83235+ GR_RLIM_DATA_BUMP = 10000,
83236+ GR_RLIM_STACK_BUMP = 1000,
83237+ GR_RLIM_CORE_BUMP = 10000,
83238+ GR_RLIM_RSS_BUMP = 500000,
83239+ GR_RLIM_NPROC_BUMP = 1,
83240+ GR_RLIM_NOFILE_BUMP = 5,
83241+ GR_RLIM_MEMLOCK_BUMP = 50000,
83242+ GR_RLIM_AS_BUMP = 500000,
83243+ GR_RLIM_LOCKS_BUMP = 2,
83244+ GR_RLIM_SIGPENDING_BUMP = 5,
83245+ GR_RLIM_MSGQUEUE_BUMP = 10000,
83246+ GR_RLIM_NICE_BUMP = 1,
83247+ GR_RLIM_RTPRIO_BUMP = 1,
83248+ GR_RLIM_RTTIME_BUMP = 1000000
83249+};
83250+
83251+#endif
83252diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
83253new file mode 100644
83254index 0000000..d25522e
83255--- /dev/null
83256+++ b/include/linux/grinternal.h
83257@@ -0,0 +1,229 @@
83258+#ifndef __GRINTERNAL_H
83259+#define __GRINTERNAL_H
83260+
83261+#ifdef CONFIG_GRKERNSEC
83262+
83263+#include <linux/fs.h>
83264+#include <linux/mnt_namespace.h>
83265+#include <linux/nsproxy.h>
83266+#include <linux/gracl.h>
83267+#include <linux/grdefs.h>
83268+#include <linux/grmsg.h>
83269+
83270+void gr_add_learn_entry(const char *fmt, ...)
83271+ __attribute__ ((format (printf, 1, 2)));
83272+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
83273+ const struct vfsmount *mnt);
83274+__u32 gr_check_create(const struct dentry *new_dentry,
83275+ const struct dentry *parent,
83276+ const struct vfsmount *mnt, const __u32 mode);
83277+int gr_check_protected_task(const struct task_struct *task);
83278+__u32 to_gr_audit(const __u32 reqmode);
83279+int gr_set_acls(const int type);
83280+int gr_acl_is_enabled(void);
83281+char gr_roletype_to_char(void);
83282+
83283+void gr_handle_alertkill(struct task_struct *task);
83284+char *gr_to_filename(const struct dentry *dentry,
83285+ const struct vfsmount *mnt);
83286+char *gr_to_filename1(const struct dentry *dentry,
83287+ const struct vfsmount *mnt);
83288+char *gr_to_filename2(const struct dentry *dentry,
83289+ const struct vfsmount *mnt);
83290+char *gr_to_filename3(const struct dentry *dentry,
83291+ const struct vfsmount *mnt);
83292+
83293+extern int grsec_enable_ptrace_readexec;
83294+extern int grsec_enable_harden_ptrace;
83295+extern int grsec_enable_link;
83296+extern int grsec_enable_fifo;
83297+extern int grsec_enable_execve;
83298+extern int grsec_enable_shm;
83299+extern int grsec_enable_execlog;
83300+extern int grsec_enable_signal;
83301+extern int grsec_enable_audit_ptrace;
83302+extern int grsec_enable_forkfail;
83303+extern int grsec_enable_time;
83304+extern int grsec_enable_rofs;
83305+extern int grsec_deny_new_usb;
83306+extern int grsec_enable_chroot_shmat;
83307+extern int grsec_enable_chroot_mount;
83308+extern int grsec_enable_chroot_double;
83309+extern int grsec_enable_chroot_pivot;
83310+extern int grsec_enable_chroot_chdir;
83311+extern int grsec_enable_chroot_chmod;
83312+extern int grsec_enable_chroot_mknod;
83313+extern int grsec_enable_chroot_fchdir;
83314+extern int grsec_enable_chroot_nice;
83315+extern int grsec_enable_chroot_execlog;
83316+extern int grsec_enable_chroot_caps;
83317+extern int grsec_enable_chroot_sysctl;
83318+extern int grsec_enable_chroot_unix;
83319+extern int grsec_enable_symlinkown;
83320+extern kgid_t grsec_symlinkown_gid;
83321+extern int grsec_enable_tpe;
83322+extern kgid_t grsec_tpe_gid;
83323+extern int grsec_enable_tpe_all;
83324+extern int grsec_enable_tpe_invert;
83325+extern int grsec_enable_socket_all;
83326+extern kgid_t grsec_socket_all_gid;
83327+extern int grsec_enable_socket_client;
83328+extern kgid_t grsec_socket_client_gid;
83329+extern int grsec_enable_socket_server;
83330+extern kgid_t grsec_socket_server_gid;
83331+extern kgid_t grsec_audit_gid;
83332+extern int grsec_enable_group;
83333+extern int grsec_enable_log_rwxmaps;
83334+extern int grsec_enable_mount;
83335+extern int grsec_enable_chdir;
83336+extern int grsec_resource_logging;
83337+extern int grsec_enable_blackhole;
83338+extern int grsec_lastack_retries;
83339+extern int grsec_enable_brute;
83340+extern int grsec_enable_harden_ipc;
83341+extern int grsec_lock;
83342+
83343+extern spinlock_t grsec_alert_lock;
83344+extern unsigned long grsec_alert_wtime;
83345+extern unsigned long grsec_alert_fyet;
83346+
83347+extern spinlock_t grsec_audit_lock;
83348+
83349+extern rwlock_t grsec_exec_file_lock;
83350+
83351+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
83352+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
83353+ (tsk)->exec_file->f_path.mnt) : "/")
83354+
83355+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
83356+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
83357+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
83358+
83359+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
83360+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
83361+ (tsk)->exec_file->f_path.mnt) : "/")
83362+
83363+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
83364+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
83365+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
83366+
83367+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
83368+
83369+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
83370+
83371+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
83372+{
83373+ if (file1 && file2) {
83374+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
83375+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
83376+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
83377+ return true;
83378+ }
83379+
83380+ return false;
83381+}
83382+
83383+#define GR_CHROOT_CAPS {{ \
83384+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
83385+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
83386+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
83387+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
83388+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
83389+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
83390+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
83391+
83392+#define security_learn(normal_msg,args...) \
83393+({ \
83394+ read_lock(&grsec_exec_file_lock); \
83395+ gr_add_learn_entry(normal_msg "\n", ## args); \
83396+ read_unlock(&grsec_exec_file_lock); \
83397+})
83398+
83399+enum {
83400+ GR_DO_AUDIT,
83401+ GR_DONT_AUDIT,
83402+ /* used for non-audit messages that we shouldn't kill the task on */
83403+ GR_DONT_AUDIT_GOOD
83404+};
83405+
83406+enum {
83407+ GR_TTYSNIFF,
83408+ GR_RBAC,
83409+ GR_RBAC_STR,
83410+ GR_STR_RBAC,
83411+ GR_RBAC_MODE2,
83412+ GR_RBAC_MODE3,
83413+ GR_FILENAME,
83414+ GR_SYSCTL_HIDDEN,
83415+ GR_NOARGS,
83416+ GR_ONE_INT,
83417+ GR_ONE_INT_TWO_STR,
83418+ GR_ONE_STR,
83419+ GR_STR_INT,
83420+ GR_TWO_STR_INT,
83421+ GR_TWO_INT,
83422+ GR_TWO_U64,
83423+ GR_THREE_INT,
83424+ GR_FIVE_INT_TWO_STR,
83425+ GR_TWO_STR,
83426+ GR_THREE_STR,
83427+ GR_FOUR_STR,
83428+ GR_STR_FILENAME,
83429+ GR_FILENAME_STR,
83430+ GR_FILENAME_TWO_INT,
83431+ GR_FILENAME_TWO_INT_STR,
83432+ GR_TEXTREL,
83433+ GR_PTRACE,
83434+ GR_RESOURCE,
83435+ GR_CAP,
83436+ GR_SIG,
83437+ GR_SIG2,
83438+ GR_CRASH1,
83439+ GR_CRASH2,
83440+ GR_PSACCT,
83441+ GR_RWXMAP,
83442+ GR_RWXMAPVMA
83443+};
83444+
83445+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
83446+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
83447+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
83448+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
83449+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
83450+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
83451+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
83452+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
83453+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
83454+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
83455+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
83456+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
83457+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
83458+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
83459+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
83460+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
83461+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
83462+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
83463+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
83464+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
83465+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
83466+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
83467+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
83468+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
83469+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
83470+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
83471+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
83472+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
83473+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
83474+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
83475+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
83476+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
83477+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
83478+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
83479+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
83480+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
83481+
83482+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
83483+
83484+#endif
83485+
83486+#endif
83487diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
83488new file mode 100644
83489index 0000000..b02ba9d
83490--- /dev/null
83491+++ b/include/linux/grmsg.h
83492@@ -0,0 +1,117 @@
83493+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
83494+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
83495+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
83496+#define GR_STOPMOD_MSG "denied modification of module state by "
83497+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
83498+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
83499+#define GR_IOPERM_MSG "denied use of ioperm() by "
83500+#define GR_IOPL_MSG "denied use of iopl() by "
83501+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
83502+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
83503+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
83504+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
83505+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
83506+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
83507+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
83508+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
83509+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
83510+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
83511+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
83512+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
83513+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
83514+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
83515+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
83516+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
83517+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
83518+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
83519+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
83520+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
83521+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
83522+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
83523+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
83524+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
83525+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
83526+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
83527+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
83528+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
83529+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
83530+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
83531+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
83532+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
83533+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
83534+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
83535+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
83536+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
83537+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
83538+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
83539+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
83540+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
83541+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
83542+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
83543+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
83544+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
83545+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
83546+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
83547+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
83548+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
83549+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
83550+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
83551+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
83552+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
83553+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
83554+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
83555+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
83556+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
83557+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
83558+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
83559+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
83560+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
83561+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
83562+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
83563+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
83564+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
83565+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
83566+#define GR_FAILFORK_MSG "failed fork with errno %s by "
83567+#define GR_NICE_CHROOT_MSG "denied priority change by "
83568+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
83569+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
83570+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
83571+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
83572+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
83573+#define GR_TIME_MSG "time set by "
83574+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
83575+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
83576+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
83577+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
83578+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
83579+#define GR_BIND_MSG "denied bind() by "
83580+#define GR_CONNECT_MSG "denied connect() by "
83581+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
83582+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
83583+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
83584+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
83585+#define GR_CAP_ACL_MSG "use of %s denied for "
83586+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
83587+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
83588+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
83589+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
83590+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
83591+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
83592+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
83593+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
83594+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
83595+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
83596+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
83597+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
83598+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
83599+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
83600+#define GR_VM86_MSG "denied use of vm86 by "
83601+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
83602+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
83603+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
83604+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
83605+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
83606+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
83607+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
83608+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
83609+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
83610diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
83611new file mode 100644
83612index 0000000..10b9635
83613--- /dev/null
83614+++ b/include/linux/grsecurity.h
83615@@ -0,0 +1,254 @@
83616+#ifndef GR_SECURITY_H
83617+#define GR_SECURITY_H
83618+#include <linux/fs.h>
83619+#include <linux/fs_struct.h>
83620+#include <linux/binfmts.h>
83621+#include <linux/gracl.h>
83622+
83623+/* notify of brain-dead configs */
83624+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83625+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
83626+#endif
83627+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83628+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
83629+#endif
83630+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
83631+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
83632+#endif
83633+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
83634+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
83635+#endif
83636+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
83637+#error "CONFIG_PAX enabled, but no PaX options are enabled."
83638+#endif
83639+
83640+int gr_handle_new_usb(void);
83641+
83642+void gr_handle_brute_attach(int dumpable);
83643+void gr_handle_brute_check(void);
83644+void gr_handle_kernel_exploit(void);
83645+
83646+char gr_roletype_to_char(void);
83647+
83648+int gr_proc_is_restricted(void);
83649+
83650+int gr_acl_enable_at_secure(void);
83651+
83652+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
83653+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
83654+
83655+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
83656+
83657+void gr_del_task_from_ip_table(struct task_struct *p);
83658+
83659+int gr_pid_is_chrooted(struct task_struct *p);
83660+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
83661+int gr_handle_chroot_nice(void);
83662+int gr_handle_chroot_sysctl(const int op);
83663+int gr_handle_chroot_setpriority(struct task_struct *p,
83664+ const int niceval);
83665+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
83666+int gr_chroot_fhandle(void);
83667+int gr_handle_chroot_chroot(const struct dentry *dentry,
83668+ const struct vfsmount *mnt);
83669+void gr_handle_chroot_chdir(const struct path *path);
83670+int gr_handle_chroot_chmod(const struct dentry *dentry,
83671+ const struct vfsmount *mnt, const int mode);
83672+int gr_handle_chroot_mknod(const struct dentry *dentry,
83673+ const struct vfsmount *mnt, const int mode);
83674+int gr_handle_chroot_mount(const struct dentry *dentry,
83675+ const struct vfsmount *mnt,
83676+ const char *dev_name);
83677+int gr_handle_chroot_pivot(void);
83678+int gr_handle_chroot_unix(const pid_t pid);
83679+
83680+int gr_handle_rawio(const struct inode *inode);
83681+
83682+void gr_handle_ioperm(void);
83683+void gr_handle_iopl(void);
83684+void gr_handle_msr_write(void);
83685+
83686+umode_t gr_acl_umask(void);
83687+
83688+int gr_tpe_allow(const struct file *file);
83689+
83690+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
83691+void gr_clear_chroot_entries(struct task_struct *task);
83692+
83693+void gr_log_forkfail(const int retval);
83694+void gr_log_timechange(void);
83695+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
83696+void gr_log_chdir(const struct dentry *dentry,
83697+ const struct vfsmount *mnt);
83698+void gr_log_chroot_exec(const struct dentry *dentry,
83699+ const struct vfsmount *mnt);
83700+void gr_log_remount(const char *devname, const int retval);
83701+void gr_log_unmount(const char *devname, const int retval);
83702+void gr_log_mount(const char *from, const char *to, const int retval);
83703+void gr_log_textrel(struct vm_area_struct *vma);
83704+void gr_log_ptgnustack(struct file *file);
83705+void gr_log_rwxmmap(struct file *file);
83706+void gr_log_rwxmprotect(struct vm_area_struct *vma);
83707+
83708+int gr_handle_follow_link(const struct inode *parent,
83709+ const struct inode *inode,
83710+ const struct dentry *dentry,
83711+ const struct vfsmount *mnt);
83712+int gr_handle_fifo(const struct dentry *dentry,
83713+ const struct vfsmount *mnt,
83714+ const struct dentry *dir, const int flag,
83715+ const int acc_mode);
83716+int gr_handle_hardlink(const struct dentry *dentry,
83717+ const struct vfsmount *mnt,
83718+ struct inode *inode,
83719+ const int mode, const struct filename *to);
83720+
83721+int gr_is_capable(const int cap);
83722+int gr_is_capable_nolog(const int cap);
83723+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
83724+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
83725+
83726+void gr_copy_label(struct task_struct *tsk);
83727+void gr_handle_crash(struct task_struct *task, const int sig);
83728+int gr_handle_signal(const struct task_struct *p, const int sig);
83729+int gr_check_crash_uid(const kuid_t uid);
83730+int gr_check_protected_task(const struct task_struct *task);
83731+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
83732+int gr_acl_handle_mmap(const struct file *file,
83733+ const unsigned long prot);
83734+int gr_acl_handle_mprotect(const struct file *file,
83735+ const unsigned long prot);
83736+int gr_check_hidden_task(const struct task_struct *tsk);
83737+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
83738+ const struct vfsmount *mnt);
83739+__u32 gr_acl_handle_utime(const struct dentry *dentry,
83740+ const struct vfsmount *mnt);
83741+__u32 gr_acl_handle_access(const struct dentry *dentry,
83742+ const struct vfsmount *mnt, const int fmode);
83743+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
83744+ const struct vfsmount *mnt, umode_t *mode);
83745+__u32 gr_acl_handle_chown(const struct dentry *dentry,
83746+ const struct vfsmount *mnt);
83747+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
83748+ const struct vfsmount *mnt);
83749+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
83750+ const struct vfsmount *mnt);
83751+int gr_handle_ptrace(struct task_struct *task, const long request);
83752+int gr_handle_proc_ptrace(struct task_struct *task);
83753+__u32 gr_acl_handle_execve(const struct dentry *dentry,
83754+ const struct vfsmount *mnt);
83755+int gr_check_crash_exec(const struct file *filp);
83756+int gr_acl_is_enabled(void);
83757+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
83758+ const kgid_t gid);
83759+int gr_set_proc_label(const struct dentry *dentry,
83760+ const struct vfsmount *mnt,
83761+ const int unsafe_flags);
83762+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
83763+ const struct vfsmount *mnt);
83764+__u32 gr_acl_handle_open(const struct dentry *dentry,
83765+ const struct vfsmount *mnt, int acc_mode);
83766+__u32 gr_acl_handle_creat(const struct dentry *dentry,
83767+ const struct dentry *p_dentry,
83768+ const struct vfsmount *p_mnt,
83769+ int open_flags, int acc_mode, const int imode);
83770+void gr_handle_create(const struct dentry *dentry,
83771+ const struct vfsmount *mnt);
83772+void gr_handle_proc_create(const struct dentry *dentry,
83773+ const struct inode *inode);
83774+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
83775+ const struct dentry *parent_dentry,
83776+ const struct vfsmount *parent_mnt,
83777+ const int mode);
83778+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
83779+ const struct dentry *parent_dentry,
83780+ const struct vfsmount *parent_mnt);
83781+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
83782+ const struct vfsmount *mnt);
83783+void gr_handle_delete(const ino_t ino, const dev_t dev);
83784+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
83785+ const struct vfsmount *mnt);
83786+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
83787+ const struct dentry *parent_dentry,
83788+ const struct vfsmount *parent_mnt,
83789+ const struct filename *from);
83790+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
83791+ const struct dentry *parent_dentry,
83792+ const struct vfsmount *parent_mnt,
83793+ const struct dentry *old_dentry,
83794+ const struct vfsmount *old_mnt, const struct filename *to);
83795+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
83796+int gr_acl_handle_rename(struct dentry *new_dentry,
83797+ struct dentry *parent_dentry,
83798+ const struct vfsmount *parent_mnt,
83799+ struct dentry *old_dentry,
83800+ struct inode *old_parent_inode,
83801+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
83802+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
83803+ struct dentry *old_dentry,
83804+ struct dentry *new_dentry,
83805+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
83806+__u32 gr_check_link(const struct dentry *new_dentry,
83807+ const struct dentry *parent_dentry,
83808+ const struct vfsmount *parent_mnt,
83809+ const struct dentry *old_dentry,
83810+ const struct vfsmount *old_mnt);
83811+int gr_acl_handle_filldir(const struct file *file, const char *name,
83812+ const unsigned int namelen, const ino_t ino);
83813+
83814+__u32 gr_acl_handle_unix(const struct dentry *dentry,
83815+ const struct vfsmount *mnt);
83816+void gr_acl_handle_exit(void);
83817+void gr_acl_handle_psacct(struct task_struct *task, const long code);
83818+int gr_acl_handle_procpidmem(const struct task_struct *task);
83819+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
83820+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
83821+void gr_audit_ptrace(struct task_struct *task);
83822+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
83823+void gr_put_exec_file(struct task_struct *task);
83824+
83825+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
83826+
83827+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
83828+extern void gr_learn_resource(const struct task_struct *task, const int res,
83829+ const unsigned long wanted, const int gt);
83830+#else
83831+static inline void gr_learn_resource(const struct task_struct *task, const int res,
83832+ const unsigned long wanted, const int gt)
83833+{
83834+}
83835+#endif
83836+
83837+#ifdef CONFIG_GRKERNSEC_RESLOG
83838+extern void gr_log_resource(const struct task_struct *task, const int res,
83839+ const unsigned long wanted, const int gt);
83840+#else
83841+static inline void gr_log_resource(const struct task_struct *task, const int res,
83842+ const unsigned long wanted, const int gt)
83843+{
83844+}
83845+#endif
83846+
83847+#ifdef CONFIG_GRKERNSEC
83848+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
83849+void gr_handle_vm86(void);
83850+void gr_handle_mem_readwrite(u64 from, u64 to);
83851+
83852+void gr_log_badprocpid(const char *entry);
83853+
83854+extern int grsec_enable_dmesg;
83855+extern int grsec_disable_privio;
83856+
83857+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
83858+extern kgid_t grsec_proc_gid;
83859+#endif
83860+
83861+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
83862+extern int grsec_enable_chroot_findtask;
83863+#endif
83864+#ifdef CONFIG_GRKERNSEC_SETXID
83865+extern int grsec_enable_setxid;
83866+#endif
83867+#endif
83868+
83869+#endif
83870diff --git a/include/linux/grsock.h b/include/linux/grsock.h
83871new file mode 100644
83872index 0000000..e7ffaaf
83873--- /dev/null
83874+++ b/include/linux/grsock.h
83875@@ -0,0 +1,19 @@
83876+#ifndef __GRSOCK_H
83877+#define __GRSOCK_H
83878+
83879+extern void gr_attach_curr_ip(const struct sock *sk);
83880+extern int gr_handle_sock_all(const int family, const int type,
83881+ const int protocol);
83882+extern int gr_handle_sock_server(const struct sockaddr *sck);
83883+extern int gr_handle_sock_server_other(const struct sock *sck);
83884+extern int gr_handle_sock_client(const struct sockaddr *sck);
83885+extern int gr_search_connect(struct socket * sock,
83886+ struct sockaddr_in * addr);
83887+extern int gr_search_bind(struct socket * sock,
83888+ struct sockaddr_in * addr);
83889+extern int gr_search_listen(struct socket * sock);
83890+extern int gr_search_accept(struct socket * sock);
83891+extern int gr_search_socket(const int domain, const int type,
83892+ const int protocol);
83893+
83894+#endif
83895diff --git a/include/linux/hash.h b/include/linux/hash.h
83896index bd1754c..8240892 100644
83897--- a/include/linux/hash.h
83898+++ b/include/linux/hash.h
83899@@ -83,7 +83,7 @@ static inline u32 hash32_ptr(const void *ptr)
83900 struct fast_hash_ops {
83901 u32 (*hash)(const void *data, u32 len, u32 seed);
83902 u32 (*hash2)(const u32 *data, u32 len, u32 seed);
83903-};
83904+} __no_const;
83905
83906 /**
83907 * arch_fast_hash - Caclulates a hash over a given buffer that can have
83908diff --git a/include/linux/highmem.h b/include/linux/highmem.h
83909index 7fb31da..08b5114 100644
83910--- a/include/linux/highmem.h
83911+++ b/include/linux/highmem.h
83912@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
83913 kunmap_atomic(kaddr);
83914 }
83915
83916+static inline void sanitize_highpage(struct page *page)
83917+{
83918+ void *kaddr;
83919+ unsigned long flags;
83920+
83921+ local_irq_save(flags);
83922+ kaddr = kmap_atomic(page);
83923+ clear_page(kaddr);
83924+ kunmap_atomic(kaddr);
83925+ local_irq_restore(flags);
83926+}
83927+
83928 static inline void zero_user_segments(struct page *page,
83929 unsigned start1, unsigned end1,
83930 unsigned start2, unsigned end2)
83931diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
83932index 1c7b89a..7dda400 100644
83933--- a/include/linux/hwmon-sysfs.h
83934+++ b/include/linux/hwmon-sysfs.h
83935@@ -25,7 +25,8 @@
83936 struct sensor_device_attribute{
83937 struct device_attribute dev_attr;
83938 int index;
83939-};
83940+} __do_const;
83941+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
83942 #define to_sensor_dev_attr(_dev_attr) \
83943 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
83944
83945@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
83946 struct device_attribute dev_attr;
83947 u8 index;
83948 u8 nr;
83949-};
83950+} __do_const;
83951+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
83952 #define to_sensor_dev_attr_2(_dev_attr) \
83953 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
83954
83955diff --git a/include/linux/i2c.h b/include/linux/i2c.h
83956index b556e0a..c10a515 100644
83957--- a/include/linux/i2c.h
83958+++ b/include/linux/i2c.h
83959@@ -378,6 +378,7 @@ struct i2c_algorithm {
83960 /* To determine what the adapter supports */
83961 u32 (*functionality) (struct i2c_adapter *);
83962 };
83963+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
83964
83965 /**
83966 * struct i2c_bus_recovery_info - I2C bus recovery information
83967diff --git a/include/linux/i2o.h b/include/linux/i2o.h
83968index d23c3c2..eb63c81 100644
83969--- a/include/linux/i2o.h
83970+++ b/include/linux/i2o.h
83971@@ -565,7 +565,7 @@ struct i2o_controller {
83972 struct i2o_device *exec; /* Executive */
83973 #if BITS_PER_LONG == 64
83974 spinlock_t context_list_lock; /* lock for context_list */
83975- atomic_t context_list_counter; /* needed for unique contexts */
83976+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
83977 struct list_head context_list; /* list of context id's
83978 and pointers */
83979 #endif
83980diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
83981index aff7ad8..3942bbd 100644
83982--- a/include/linux/if_pppox.h
83983+++ b/include/linux/if_pppox.h
83984@@ -76,7 +76,7 @@ struct pppox_proto {
83985 int (*ioctl)(struct socket *sock, unsigned int cmd,
83986 unsigned long arg);
83987 struct module *owner;
83988-};
83989+} __do_const;
83990
83991 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
83992 extern void unregister_pppox_proto(int proto_num);
83993diff --git a/include/linux/init.h b/include/linux/init.h
83994index 2df8e8d..3e1280d 100644
83995--- a/include/linux/init.h
83996+++ b/include/linux/init.h
83997@@ -37,9 +37,17 @@
83998 * section.
83999 */
84000
84001+#define add_init_latent_entropy __latent_entropy
84002+
84003+#ifdef CONFIG_MEMORY_HOTPLUG
84004+#define add_meminit_latent_entropy
84005+#else
84006+#define add_meminit_latent_entropy __latent_entropy
84007+#endif
84008+
84009 /* These are for everybody (although not all archs will actually
84010 discard it in modules) */
84011-#define __init __section(.init.text) __cold notrace
84012+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
84013 #define __initdata __section(.init.data)
84014 #define __initconst __constsection(.init.rodata)
84015 #define __exitdata __section(.exit.data)
84016@@ -100,7 +108,7 @@
84017 #define __cpuexitconst
84018
84019 /* Used for MEMORY_HOTPLUG */
84020-#define __meminit __section(.meminit.text) __cold notrace
84021+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
84022 #define __meminitdata __section(.meminit.data)
84023 #define __meminitconst __constsection(.meminit.rodata)
84024 #define __memexit __section(.memexit.text) __exitused __cold notrace
84025diff --git a/include/linux/init_task.h b/include/linux/init_task.h
84026index 6df7f9f..d0bf699 100644
84027--- a/include/linux/init_task.h
84028+++ b/include/linux/init_task.h
84029@@ -156,6 +156,12 @@ extern struct task_group root_task_group;
84030
84031 #define INIT_TASK_COMM "swapper"
84032
84033+#ifdef CONFIG_X86
84034+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
84035+#else
84036+#define INIT_TASK_THREAD_INFO
84037+#endif
84038+
84039 #ifdef CONFIG_RT_MUTEXES
84040 # define INIT_RT_MUTEXES(tsk) \
84041 .pi_waiters = RB_ROOT, \
84042@@ -203,6 +209,7 @@ extern struct task_group root_task_group;
84043 RCU_POINTER_INITIALIZER(cred, &init_cred), \
84044 .comm = INIT_TASK_COMM, \
84045 .thread = INIT_THREAD, \
84046+ INIT_TASK_THREAD_INFO \
84047 .fs = &init_fs, \
84048 .files = &init_files, \
84049 .signal = &init_signals, \
84050diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
84051index 698ad05..8601bb7 100644
84052--- a/include/linux/interrupt.h
84053+++ b/include/linux/interrupt.h
84054@@ -418,8 +418,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
84055
84056 struct softirq_action
84057 {
84058- void (*action)(struct softirq_action *);
84059-};
84060+ void (*action)(void);
84061+} __no_const;
84062
84063 asmlinkage void do_softirq(void);
84064 asmlinkage void __do_softirq(void);
84065@@ -433,7 +433,7 @@ static inline void do_softirq_own_stack(void)
84066 }
84067 #endif
84068
84069-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
84070+extern void open_softirq(int nr, void (*action)(void));
84071 extern void softirq_init(void);
84072 extern void __raise_softirq_irqoff(unsigned int nr);
84073
84074diff --git a/include/linux/iommu.h b/include/linux/iommu.h
84075index b96a5b2..2732d1c 100644
84076--- a/include/linux/iommu.h
84077+++ b/include/linux/iommu.h
84078@@ -131,7 +131,7 @@ struct iommu_ops {
84079 u32 (*domain_get_windows)(struct iommu_domain *domain);
84080
84081 unsigned long pgsize_bitmap;
84082-};
84083+} __do_const;
84084
84085 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
84086 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
84087diff --git a/include/linux/ioport.h b/include/linux/ioport.h
84088index 5e3a906..3131d0f 100644
84089--- a/include/linux/ioport.h
84090+++ b/include/linux/ioport.h
84091@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
84092 int adjust_resource(struct resource *res, resource_size_t start,
84093 resource_size_t size);
84094 resource_size_t resource_alignment(struct resource *res);
84095-static inline resource_size_t resource_size(const struct resource *res)
84096+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
84097 {
84098 return res->end - res->start + 1;
84099 }
84100diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
84101index 35e7eca..6afb7ad 100644
84102--- a/include/linux/ipc_namespace.h
84103+++ b/include/linux/ipc_namespace.h
84104@@ -69,7 +69,7 @@ struct ipc_namespace {
84105 struct user_namespace *user_ns;
84106
84107 unsigned int proc_inum;
84108-};
84109+} __randomize_layout;
84110
84111 extern struct ipc_namespace init_ipc_ns;
84112 extern atomic_t nr_ipc_ns;
84113diff --git a/include/linux/irq.h b/include/linux/irq.h
84114index 0d998d8..3a1c782 100644
84115--- a/include/linux/irq.h
84116+++ b/include/linux/irq.h
84117@@ -344,7 +344,8 @@ struct irq_chip {
84118 void (*irq_release_resources)(struct irq_data *data);
84119
84120 unsigned long flags;
84121-};
84122+} __do_const;
84123+typedef struct irq_chip __no_const irq_chip_no_const;
84124
84125 /*
84126 * irq_chip specific flags
84127diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
84128index 45e2d8c..26d85da 100644
84129--- a/include/linux/irqchip/arm-gic.h
84130+++ b/include/linux/irqchip/arm-gic.h
84131@@ -75,9 +75,11 @@
84132
84133 #ifndef __ASSEMBLY__
84134
84135+#include <linux/irq.h>
84136+
84137 struct device_node;
84138
84139-extern struct irq_chip gic_arch_extn;
84140+extern irq_chip_no_const gic_arch_extn;
84141
84142 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
84143 u32 offset, struct device_node *);
84144diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h
84145index 8e10f57..d5f62bc 100644
84146--- a/include/linux/isdn_ppp.h
84147+++ b/include/linux/isdn_ppp.h
84148@@ -180,8 +180,9 @@ struct ippp_struct {
84149 struct slcompress *slcomp;
84150 #endif
84151 #ifdef CONFIG_IPPP_FILTER
84152- struct sk_filter *pass_filter; /* filter for packets to pass */
84153- struct sk_filter *active_filter; /* filter for pkts to reset idle */
84154+ struct sock_filter *pass_filter; /* filter for packets to pass */
84155+ struct sock_filter *active_filter; /* filter for pkts to reset idle */
84156+ unsigned pass_len, active_len;
84157 #endif
84158 unsigned long debug;
84159 struct isdn_ppp_compressor *compressor,*decompressor;
84160diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
84161index 1f44466..b481806 100644
84162--- a/include/linux/jiffies.h
84163+++ b/include/linux/jiffies.h
84164@@ -292,20 +292,20 @@ extern unsigned long preset_lpj;
84165 /*
84166 * Convert various time units to each other:
84167 */
84168-extern unsigned int jiffies_to_msecs(const unsigned long j);
84169-extern unsigned int jiffies_to_usecs(const unsigned long j);
84170+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
84171+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
84172
84173-static inline u64 jiffies_to_nsecs(const unsigned long j)
84174+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
84175 {
84176 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
84177 }
84178
84179-extern unsigned long msecs_to_jiffies(const unsigned int m);
84180-extern unsigned long usecs_to_jiffies(const unsigned int u);
84181+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
84182+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
84183 extern unsigned long timespec_to_jiffies(const struct timespec *value);
84184 extern void jiffies_to_timespec(const unsigned long jiffies,
84185- struct timespec *value);
84186-extern unsigned long timeval_to_jiffies(const struct timeval *value);
84187+ struct timespec *value) __intentional_overflow(-1);
84188+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
84189 extern void jiffies_to_timeval(const unsigned long jiffies,
84190 struct timeval *value);
84191
84192diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
84193index 6883e19..e854fcb 100644
84194--- a/include/linux/kallsyms.h
84195+++ b/include/linux/kallsyms.h
84196@@ -15,7 +15,8 @@
84197
84198 struct module;
84199
84200-#ifdef CONFIG_KALLSYMS
84201+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
84202+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
84203 /* Lookup the address for a symbol. Returns 0 if not found. */
84204 unsigned long kallsyms_lookup_name(const char *name);
84205
84206@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
84207 /* Stupid that this does nothing, but I didn't create this mess. */
84208 #define __print_symbol(fmt, addr)
84209 #endif /*CONFIG_KALLSYMS*/
84210+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
84211+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
84212+extern unsigned long kallsyms_lookup_name(const char *name);
84213+extern void __print_symbol(const char *fmt, unsigned long address);
84214+extern int sprint_backtrace(char *buffer, unsigned long address);
84215+extern int sprint_symbol(char *buffer, unsigned long address);
84216+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
84217+const char *kallsyms_lookup(unsigned long addr,
84218+ unsigned long *symbolsize,
84219+ unsigned long *offset,
84220+ char **modname, char *namebuf);
84221+extern int kallsyms_lookup_size_offset(unsigned long addr,
84222+ unsigned long *symbolsize,
84223+ unsigned long *offset);
84224+#endif
84225
84226 /* This macro allows us to keep printk typechecking */
84227 static __printf(1, 2)
84228diff --git a/include/linux/key-type.h b/include/linux/key-type.h
84229index a74c3a8..28d3f21 100644
84230--- a/include/linux/key-type.h
84231+++ b/include/linux/key-type.h
84232@@ -131,7 +131,7 @@ struct key_type {
84233 /* internal fields */
84234 struct list_head link; /* link in types list */
84235 struct lock_class_key lock_class; /* key->sem lock class */
84236-};
84237+} __do_const;
84238
84239 extern struct key_type key_type_keyring;
84240
84241diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
84242index 6b06d37..c134867 100644
84243--- a/include/linux/kgdb.h
84244+++ b/include/linux/kgdb.h
84245@@ -52,7 +52,7 @@ extern int kgdb_connected;
84246 extern int kgdb_io_module_registered;
84247
84248 extern atomic_t kgdb_setting_breakpoint;
84249-extern atomic_t kgdb_cpu_doing_single_step;
84250+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
84251
84252 extern struct task_struct *kgdb_usethread;
84253 extern struct task_struct *kgdb_contthread;
84254@@ -254,7 +254,7 @@ struct kgdb_arch {
84255 void (*correct_hw_break)(void);
84256
84257 void (*enable_nmi)(bool on);
84258-};
84259+} __do_const;
84260
84261 /**
84262 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
84263@@ -279,7 +279,7 @@ struct kgdb_io {
84264 void (*pre_exception) (void);
84265 void (*post_exception) (void);
84266 int is_console;
84267-};
84268+} __do_const;
84269
84270 extern struct kgdb_arch arch_kgdb_ops;
84271
84272diff --git a/include/linux/kmod.h b/include/linux/kmod.h
84273index 0555cc6..40116ce 100644
84274--- a/include/linux/kmod.h
84275+++ b/include/linux/kmod.h
84276@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
84277 * usually useless though. */
84278 extern __printf(2, 3)
84279 int __request_module(bool wait, const char *name, ...);
84280+extern __printf(3, 4)
84281+int ___request_module(bool wait, char *param_name, const char *name, ...);
84282 #define request_module(mod...) __request_module(true, mod)
84283 #define request_module_nowait(mod...) __request_module(false, mod)
84284 #define try_then_request_module(x, mod...) \
84285@@ -57,6 +59,9 @@ struct subprocess_info {
84286 struct work_struct work;
84287 struct completion *complete;
84288 char *path;
84289+#ifdef CONFIG_GRKERNSEC
84290+ char *origpath;
84291+#endif
84292 char **argv;
84293 char **envp;
84294 int wait;
84295diff --git a/include/linux/kobject.h b/include/linux/kobject.h
84296index 2d61b90..a1d0a13 100644
84297--- a/include/linux/kobject.h
84298+++ b/include/linux/kobject.h
84299@@ -118,7 +118,7 @@ struct kobj_type {
84300 struct attribute **default_attrs;
84301 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
84302 const void *(*namespace)(struct kobject *kobj);
84303-};
84304+} __do_const;
84305
84306 struct kobj_uevent_env {
84307 char *argv[3];
84308@@ -142,6 +142,7 @@ struct kobj_attribute {
84309 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
84310 const char *buf, size_t count);
84311 };
84312+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
84313
84314 extern const struct sysfs_ops kobj_sysfs_ops;
84315
84316@@ -169,7 +170,7 @@ struct kset {
84317 spinlock_t list_lock;
84318 struct kobject kobj;
84319 const struct kset_uevent_ops *uevent_ops;
84320-};
84321+} __randomize_layout;
84322
84323 extern void kset_init(struct kset *kset);
84324 extern int __must_check kset_register(struct kset *kset);
84325diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
84326index df32d25..fb52e27 100644
84327--- a/include/linux/kobject_ns.h
84328+++ b/include/linux/kobject_ns.h
84329@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
84330 const void *(*netlink_ns)(struct sock *sk);
84331 const void *(*initial_ns)(void);
84332 void (*drop_ns)(void *);
84333-};
84334+} __do_const;
84335
84336 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
84337 int kobj_ns_type_registered(enum kobj_ns_type type);
84338diff --git a/include/linux/kref.h b/include/linux/kref.h
84339index 484604d..0f6c5b6 100644
84340--- a/include/linux/kref.h
84341+++ b/include/linux/kref.h
84342@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
84343 static inline int kref_sub(struct kref *kref, unsigned int count,
84344 void (*release)(struct kref *kref))
84345 {
84346- WARN_ON(release == NULL);
84347+ BUG_ON(release == NULL);
84348
84349 if (atomic_sub_and_test((int) count, &kref->refcount)) {
84350 release(kref);
84351diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
84352index ec4e3bd..14db03a 100644
84353--- a/include/linux/kvm_host.h
84354+++ b/include/linux/kvm_host.h
84355@@ -468,7 +468,7 @@ static inline void kvm_irqfd_exit(void)
84356 {
84357 }
84358 #endif
84359-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
84360+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
84361 struct module *module);
84362 void kvm_exit(void);
84363
84364@@ -634,7 +634,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
84365 struct kvm_guest_debug *dbg);
84366 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
84367
84368-int kvm_arch_init(void *opaque);
84369+int kvm_arch_init(const void *opaque);
84370 void kvm_arch_exit(void);
84371
84372 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
84373diff --git a/include/linux/libata.h b/include/linux/libata.h
84374index 92abb49..e7fff2a 100644
84375--- a/include/linux/libata.h
84376+++ b/include/linux/libata.h
84377@@ -976,7 +976,7 @@ struct ata_port_operations {
84378 * fields must be pointers.
84379 */
84380 const struct ata_port_operations *inherits;
84381-};
84382+} __do_const;
84383
84384 struct ata_port_info {
84385 unsigned long flags;
84386diff --git a/include/linux/linkage.h b/include/linux/linkage.h
84387index a6a42dd..6c5ebce 100644
84388--- a/include/linux/linkage.h
84389+++ b/include/linux/linkage.h
84390@@ -36,6 +36,7 @@
84391 #endif
84392
84393 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
84394+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
84395 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
84396
84397 /*
84398diff --git a/include/linux/list.h b/include/linux/list.h
84399index ef95941..82db65a 100644
84400--- a/include/linux/list.h
84401+++ b/include/linux/list.h
84402@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
84403 extern void list_del(struct list_head *entry);
84404 #endif
84405
84406+extern void __pax_list_add(struct list_head *new,
84407+ struct list_head *prev,
84408+ struct list_head *next);
84409+static inline void pax_list_add(struct list_head *new, struct list_head *head)
84410+{
84411+ __pax_list_add(new, head, head->next);
84412+}
84413+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
84414+{
84415+ __pax_list_add(new, head->prev, head);
84416+}
84417+extern void pax_list_del(struct list_head *entry);
84418+
84419 /**
84420 * list_replace - replace old entry by new one
84421 * @old : the element to be replaced
84422@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
84423 INIT_LIST_HEAD(entry);
84424 }
84425
84426+extern void pax_list_del_init(struct list_head *entry);
84427+
84428 /**
84429 * list_move - delete from one list and add as another's head
84430 * @list: the entry to move
84431diff --git a/include/linux/math64.h b/include/linux/math64.h
84432index c45c089..298841c 100644
84433--- a/include/linux/math64.h
84434+++ b/include/linux/math64.h
84435@@ -15,7 +15,7 @@
84436 * This is commonly provided by 32bit archs to provide an optimized 64bit
84437 * divide.
84438 */
84439-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84440+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84441 {
84442 *remainder = dividend % divisor;
84443 return dividend / divisor;
84444@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
84445 /**
84446 * div64_u64 - unsigned 64bit divide with 64bit divisor
84447 */
84448-static inline u64 div64_u64(u64 dividend, u64 divisor)
84449+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
84450 {
84451 return dividend / divisor;
84452 }
84453@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
84454 #define div64_ul(x, y) div_u64((x), (y))
84455
84456 #ifndef div_u64_rem
84457-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84458+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84459 {
84460 *remainder = do_div(dividend, divisor);
84461 return dividend;
84462@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
84463 #endif
84464
84465 #ifndef div64_u64
84466-extern u64 div64_u64(u64 dividend, u64 divisor);
84467+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
84468 #endif
84469
84470 #ifndef div64_s64
84471@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
84472 * divide.
84473 */
84474 #ifndef div_u64
84475-static inline u64 div_u64(u64 dividend, u32 divisor)
84476+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
84477 {
84478 u32 remainder;
84479 return div_u64_rem(dividend, divisor, &remainder);
84480diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
84481index f230a97..714c006 100644
84482--- a/include/linux/mempolicy.h
84483+++ b/include/linux/mempolicy.h
84484@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
84485 }
84486
84487 #define vma_policy(vma) ((vma)->vm_policy)
84488+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
84489+{
84490+ vma->vm_policy = pol;
84491+}
84492
84493 static inline void mpol_get(struct mempolicy *pol)
84494 {
84495@@ -228,6 +232,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
84496 }
84497
84498 #define vma_policy(vma) NULL
84499+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
84500+{
84501+}
84502
84503 static inline int
84504 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
84505diff --git a/include/linux/mm.h b/include/linux/mm.h
84506index e03dd29..eaf923c 100644
84507--- a/include/linux/mm.h
84508+++ b/include/linux/mm.h
84509@@ -127,6 +127,11 @@ extern unsigned int kobjsize(const void *objp);
84510 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
84511 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
84512 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
84513+
84514+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
84515+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
84516+#endif
84517+
84518 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
84519
84520 #ifdef CONFIG_MEM_SOFT_DIRTY
84521@@ -237,8 +242,8 @@ struct vm_operations_struct {
84522 /* called by access_process_vm when get_user_pages() fails, typically
84523 * for use by special VMAs that can switch between memory and hardware
84524 */
84525- int (*access)(struct vm_area_struct *vma, unsigned long addr,
84526- void *buf, int len, int write);
84527+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
84528+ void *buf, size_t len, int write);
84529
84530 /* Called by the /proc/PID/maps code to ask the vma whether it
84531 * has a special name. Returning non-NULL will also cause this
84532@@ -274,6 +279,7 @@ struct vm_operations_struct {
84533 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
84534 unsigned long size, pgoff_t pgoff);
84535 };
84536+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
84537
84538 struct mmu_gather;
84539 struct inode;
84540@@ -1144,8 +1150,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
84541 unsigned long *pfn);
84542 int follow_phys(struct vm_area_struct *vma, unsigned long address,
84543 unsigned int flags, unsigned long *prot, resource_size_t *phys);
84544-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
84545- void *buf, int len, int write);
84546+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
84547+ void *buf, size_t len, int write);
84548
84549 static inline void unmap_shared_mapping_range(struct address_space *mapping,
84550 loff_t const holebegin, loff_t const holelen)
84551@@ -1184,9 +1190,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
84552 }
84553 #endif
84554
84555-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
84556-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
84557- void *buf, int len, int write);
84558+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
84559+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
84560+ void *buf, size_t len, int write);
84561
84562 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
84563 unsigned long start, unsigned long nr_pages,
84564@@ -1219,34 +1225,6 @@ int set_page_dirty_lock(struct page *page);
84565 int clear_page_dirty_for_io(struct page *page);
84566 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
84567
84568-/* Is the vma a continuation of the stack vma above it? */
84569-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
84570-{
84571- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
84572-}
84573-
84574-static inline int stack_guard_page_start(struct vm_area_struct *vma,
84575- unsigned long addr)
84576-{
84577- return (vma->vm_flags & VM_GROWSDOWN) &&
84578- (vma->vm_start == addr) &&
84579- !vma_growsdown(vma->vm_prev, addr);
84580-}
84581-
84582-/* Is the vma a continuation of the stack vma below it? */
84583-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
84584-{
84585- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
84586-}
84587-
84588-static inline int stack_guard_page_end(struct vm_area_struct *vma,
84589- unsigned long addr)
84590-{
84591- return (vma->vm_flags & VM_GROWSUP) &&
84592- (vma->vm_end == addr) &&
84593- !vma_growsup(vma->vm_next, addr);
84594-}
84595-
84596 extern pid_t
84597 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
84598
84599@@ -1346,6 +1324,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
84600 }
84601 #endif
84602
84603+#ifdef CONFIG_MMU
84604+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
84605+#else
84606+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
84607+{
84608+ return __pgprot(0);
84609+}
84610+#endif
84611+
84612 int vma_wants_writenotify(struct vm_area_struct *vma);
84613
84614 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
84615@@ -1364,8 +1351,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
84616 {
84617 return 0;
84618 }
84619+
84620+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
84621+ unsigned long address)
84622+{
84623+ return 0;
84624+}
84625 #else
84626 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
84627+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
84628 #endif
84629
84630 #ifdef __PAGETABLE_PMD_FOLDED
84631@@ -1374,8 +1368,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
84632 {
84633 return 0;
84634 }
84635+
84636+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
84637+ unsigned long address)
84638+{
84639+ return 0;
84640+}
84641 #else
84642 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
84643+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
84644 #endif
84645
84646 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
84647@@ -1393,11 +1394,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
84648 NULL: pud_offset(pgd, address);
84649 }
84650
84651+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
84652+{
84653+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
84654+ NULL: pud_offset(pgd, address);
84655+}
84656+
84657 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
84658 {
84659 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
84660 NULL: pmd_offset(pud, address);
84661 }
84662+
84663+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
84664+{
84665+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
84666+ NULL: pmd_offset(pud, address);
84667+}
84668 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
84669
84670 #if USE_SPLIT_PTE_PTLOCKS
84671@@ -1796,7 +1809,7 @@ extern int install_special_mapping(struct mm_struct *mm,
84672 unsigned long addr, unsigned long len,
84673 unsigned long flags, struct page **pages);
84674
84675-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
84676+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
84677
84678 extern unsigned long mmap_region(struct file *file, unsigned long addr,
84679 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
84680@@ -1804,6 +1817,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
84681 unsigned long len, unsigned long prot, unsigned long flags,
84682 unsigned long pgoff, unsigned long *populate);
84683 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
84684+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
84685
84686 #ifdef CONFIG_MMU
84687 extern int __mm_populate(unsigned long addr, unsigned long len,
84688@@ -1832,10 +1846,11 @@ struct vm_unmapped_area_info {
84689 unsigned long high_limit;
84690 unsigned long align_mask;
84691 unsigned long align_offset;
84692+ unsigned long threadstack_offset;
84693 };
84694
84695-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
84696-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
84697+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
84698+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
84699
84700 /*
84701 * Search for an unmapped address range.
84702@@ -1847,7 +1862,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
84703 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
84704 */
84705 static inline unsigned long
84706-vm_unmapped_area(struct vm_unmapped_area_info *info)
84707+vm_unmapped_area(const struct vm_unmapped_area_info *info)
84708 {
84709 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
84710 return unmapped_area(info);
84711@@ -1909,6 +1924,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
84712 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
84713 struct vm_area_struct **pprev);
84714
84715+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
84716+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
84717+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
84718+
84719 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
84720 NULL if none. Assume start_addr < end_addr. */
84721 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
84722@@ -1937,15 +1956,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
84723 return vma;
84724 }
84725
84726-#ifdef CONFIG_MMU
84727-pgprot_t vm_get_page_prot(unsigned long vm_flags);
84728-#else
84729-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
84730-{
84731- return __pgprot(0);
84732-}
84733-#endif
84734-
84735 #ifdef CONFIG_NUMA_BALANCING
84736 unsigned long change_prot_numa(struct vm_area_struct *vma,
84737 unsigned long start, unsigned long end);
84738@@ -1997,6 +2007,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
84739 static inline void vm_stat_account(struct mm_struct *mm,
84740 unsigned long flags, struct file *file, long pages)
84741 {
84742+
84743+#ifdef CONFIG_PAX_RANDMMAP
84744+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
84745+#endif
84746+
84747 mm->total_vm += pages;
84748 }
84749 #endif /* CONFIG_PROC_FS */
84750@@ -2078,7 +2093,7 @@ extern int unpoison_memory(unsigned long pfn);
84751 extern int sysctl_memory_failure_early_kill;
84752 extern int sysctl_memory_failure_recovery;
84753 extern void shake_page(struct page *p, int access);
84754-extern atomic_long_t num_poisoned_pages;
84755+extern atomic_long_unchecked_t num_poisoned_pages;
84756 extern int soft_offline_page(struct page *page, int flags);
84757
84758 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
84759@@ -2113,5 +2128,11 @@ void __init setup_nr_node_ids(void);
84760 static inline void setup_nr_node_ids(void) {}
84761 #endif
84762
84763+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
84764+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
84765+#else
84766+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
84767+#endif
84768+
84769 #endif /* __KERNEL__ */
84770 #endif /* _LINUX_MM_H */
84771diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
84772index 96c5750..15668ba 100644
84773--- a/include/linux/mm_types.h
84774+++ b/include/linux/mm_types.h
84775@@ -308,7 +308,9 @@ struct vm_area_struct {
84776 #ifdef CONFIG_NUMA
84777 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
84778 #endif
84779-};
84780+
84781+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
84782+} __randomize_layout;
84783
84784 struct core_thread {
84785 struct task_struct *task;
84786@@ -454,7 +456,25 @@ struct mm_struct {
84787 bool tlb_flush_pending;
84788 #endif
84789 struct uprobes_state uprobes_state;
84790-};
84791+
84792+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
84793+ unsigned long pax_flags;
84794+#endif
84795+
84796+#ifdef CONFIG_PAX_DLRESOLVE
84797+ unsigned long call_dl_resolve;
84798+#endif
84799+
84800+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
84801+ unsigned long call_syscall;
84802+#endif
84803+
84804+#ifdef CONFIG_PAX_ASLR
84805+ unsigned long delta_mmap; /* randomized offset */
84806+ unsigned long delta_stack; /* randomized offset */
84807+#endif
84808+
84809+} __randomize_layout;
84810
84811 static inline void mm_init_cpumask(struct mm_struct *mm)
84812 {
84813diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
84814index c5d5278..f0b68c8 100644
84815--- a/include/linux/mmiotrace.h
84816+++ b/include/linux/mmiotrace.h
84817@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
84818 /* Called from ioremap.c */
84819 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
84820 void __iomem *addr);
84821-extern void mmiotrace_iounmap(volatile void __iomem *addr);
84822+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
84823
84824 /* For anyone to insert markers. Remember trailing newline. */
84825 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
84826@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
84827 {
84828 }
84829
84830-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
84831+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
84832 {
84833 }
84834
84835diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
84836index 6cbd1b6..b1d2f99 100644
84837--- a/include/linux/mmzone.h
84838+++ b/include/linux/mmzone.h
84839@@ -412,7 +412,7 @@ struct zone {
84840 unsigned long flags; /* zone flags, see below */
84841
84842 /* Zone statistics */
84843- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
84844+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
84845
84846 /*
84847 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
84848diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
84849index 44eeef0..a92d3f9 100644
84850--- a/include/linux/mod_devicetable.h
84851+++ b/include/linux/mod_devicetable.h
84852@@ -139,7 +139,7 @@ struct usb_device_id {
84853 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
84854 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
84855
84856-#define HID_ANY_ID (~0)
84857+#define HID_ANY_ID (~0U)
84858 #define HID_BUS_ANY 0xffff
84859 #define HID_GROUP_ANY 0x0000
84860
84861@@ -475,7 +475,7 @@ struct dmi_system_id {
84862 const char *ident;
84863 struct dmi_strmatch matches[4];
84864 void *driver_data;
84865-};
84866+} __do_const;
84867 /*
84868 * struct dmi_device_id appears during expansion of
84869 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
84870diff --git a/include/linux/module.h b/include/linux/module.h
84871index f520a76..5f898ef 100644
84872--- a/include/linux/module.h
84873+++ b/include/linux/module.h
84874@@ -17,9 +17,11 @@
84875 #include <linux/moduleparam.h>
84876 #include <linux/jump_label.h>
84877 #include <linux/export.h>
84878+#include <linux/fs.h>
84879
84880 #include <linux/percpu.h>
84881 #include <asm/module.h>
84882+#include <asm/pgtable.h>
84883
84884 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
84885 #define MODULE_SIG_STRING "~Module signature appended~\n"
84886@@ -42,7 +44,7 @@ struct module_kobject {
84887 struct kobject *drivers_dir;
84888 struct module_param_attrs *mp;
84889 struct completion *kobj_completion;
84890-};
84891+} __randomize_layout;
84892
84893 struct module_attribute {
84894 struct attribute attr;
84895@@ -54,12 +56,13 @@ struct module_attribute {
84896 int (*test)(struct module *);
84897 void (*free)(struct module *);
84898 };
84899+typedef struct module_attribute __no_const module_attribute_no_const;
84900
84901 struct module_version_attribute {
84902 struct module_attribute mattr;
84903 const char *module_name;
84904 const char *version;
84905-} __attribute__ ((__aligned__(sizeof(void *))));
84906+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
84907
84908 extern ssize_t __modver_version_show(struct module_attribute *,
84909 struct module_kobject *, char *);
84910@@ -235,7 +238,7 @@ struct module {
84911
84912 /* Sysfs stuff. */
84913 struct module_kobject mkobj;
84914- struct module_attribute *modinfo_attrs;
84915+ module_attribute_no_const *modinfo_attrs;
84916 const char *version;
84917 const char *srcversion;
84918 struct kobject *holders_dir;
84919@@ -284,19 +287,16 @@ struct module {
84920 int (*init)(void);
84921
84922 /* If this is non-NULL, vfree after init() returns */
84923- void *module_init;
84924+ void *module_init_rx, *module_init_rw;
84925
84926 /* Here is the actual code + data, vfree'd on unload. */
84927- void *module_core;
84928+ void *module_core_rx, *module_core_rw;
84929
84930 /* Here are the sizes of the init and core sections */
84931- unsigned int init_size, core_size;
84932+ unsigned int init_size_rw, core_size_rw;
84933
84934 /* The size of the executable code in each section. */
84935- unsigned int init_text_size, core_text_size;
84936-
84937- /* Size of RO sections of the module (text+rodata) */
84938- unsigned int init_ro_size, core_ro_size;
84939+ unsigned int init_size_rx, core_size_rx;
84940
84941 /* Arch-specific module values */
84942 struct mod_arch_specific arch;
84943@@ -352,6 +352,10 @@ struct module {
84944 #ifdef CONFIG_EVENT_TRACING
84945 struct ftrace_event_call **trace_events;
84946 unsigned int num_trace_events;
84947+ struct file_operations trace_id;
84948+ struct file_operations trace_enable;
84949+ struct file_operations trace_format;
84950+ struct file_operations trace_filter;
84951 #endif
84952 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
84953 unsigned int num_ftrace_callsites;
84954@@ -375,7 +379,7 @@ struct module {
84955 ctor_fn_t *ctors;
84956 unsigned int num_ctors;
84957 #endif
84958-};
84959+} __randomize_layout;
84960 #ifndef MODULE_ARCH_INIT
84961 #define MODULE_ARCH_INIT {}
84962 #endif
84963@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
84964 bool is_module_percpu_address(unsigned long addr);
84965 bool is_module_text_address(unsigned long addr);
84966
84967+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
84968+{
84969+
84970+#ifdef CONFIG_PAX_KERNEXEC
84971+ if (ktla_ktva(addr) >= (unsigned long)start &&
84972+ ktla_ktva(addr) < (unsigned long)start + size)
84973+ return 1;
84974+#endif
84975+
84976+ return ((void *)addr >= start && (void *)addr < start + size);
84977+}
84978+
84979+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
84980+{
84981+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
84982+}
84983+
84984+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
84985+{
84986+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
84987+}
84988+
84989+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
84990+{
84991+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
84992+}
84993+
84994+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
84995+{
84996+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
84997+}
84998+
84999 static inline int within_module_core(unsigned long addr, const struct module *mod)
85000 {
85001- return (unsigned long)mod->module_core <= addr &&
85002- addr < (unsigned long)mod->module_core + mod->core_size;
85003+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
85004 }
85005
85006 static inline int within_module_init(unsigned long addr, const struct module *mod)
85007 {
85008- return (unsigned long)mod->module_init <= addr &&
85009- addr < (unsigned long)mod->module_init + mod->init_size;
85010+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
85011 }
85012
85013 /* Search for module by name: must hold module_mutex. */
85014diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
85015index 560ca53..ef621ef 100644
85016--- a/include/linux/moduleloader.h
85017+++ b/include/linux/moduleloader.h
85018@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
85019 sections. Returns NULL on failure. */
85020 void *module_alloc(unsigned long size);
85021
85022+#ifdef CONFIG_PAX_KERNEXEC
85023+void *module_alloc_exec(unsigned long size);
85024+#else
85025+#define module_alloc_exec(x) module_alloc(x)
85026+#endif
85027+
85028 /* Free memory returned from module_alloc. */
85029 void module_free(struct module *mod, void *module_region);
85030
85031+#ifdef CONFIG_PAX_KERNEXEC
85032+void module_free_exec(struct module *mod, void *module_region);
85033+#else
85034+#define module_free_exec(x, y) module_free((x), (y))
85035+#endif
85036+
85037 /*
85038 * Apply the given relocation to the (simplified) ELF. Return -error
85039 * or 0.
85040@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
85041 unsigned int relsec,
85042 struct module *me)
85043 {
85044+#ifdef CONFIG_MODULES
85045 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
85046+#endif
85047 return -ENOEXEC;
85048 }
85049 #endif
85050@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
85051 unsigned int relsec,
85052 struct module *me)
85053 {
85054+#ifdef CONFIG_MODULES
85055 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
85056+#endif
85057 return -ENOEXEC;
85058 }
85059 #endif
85060diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
85061index b1990c5..2a6e611 100644
85062--- a/include/linux/moduleparam.h
85063+++ b/include/linux/moduleparam.h
85064@@ -293,7 +293,7 @@ static inline void __kernel_param_unlock(void)
85065 * @len is usually just sizeof(string).
85066 */
85067 #define module_param_string(name, string, len, perm) \
85068- static const struct kparam_string __param_string_##name \
85069+ static const struct kparam_string __param_string_##name __used \
85070 = { len, string }; \
85071 __module_param_call(MODULE_PARAM_PREFIX, name, \
85072 &param_ops_string, \
85073@@ -432,7 +432,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
85074 */
85075 #define module_param_array_named(name, array, type, nump, perm) \
85076 param_check_##type(name, &(array)[0]); \
85077- static const struct kparam_array __param_arr_##name \
85078+ static const struct kparam_array __param_arr_##name __used \
85079 = { .max = ARRAY_SIZE(array), .num = nump, \
85080 .ops = &param_ops_##type, \
85081 .elemsize = sizeof(array[0]), .elem = array }; \
85082diff --git a/include/linux/mount.h b/include/linux/mount.h
85083index 839bac2..a96b37c 100644
85084--- a/include/linux/mount.h
85085+++ b/include/linux/mount.h
85086@@ -59,7 +59,7 @@ struct vfsmount {
85087 struct dentry *mnt_root; /* root of the mounted tree */
85088 struct super_block *mnt_sb; /* pointer to superblock */
85089 int mnt_flags;
85090-};
85091+} __randomize_layout;
85092
85093 struct file; /* forward dec */
85094
85095diff --git a/include/linux/namei.h b/include/linux/namei.h
85096index 492de72..1bddcd4 100644
85097--- a/include/linux/namei.h
85098+++ b/include/linux/namei.h
85099@@ -19,7 +19,7 @@ struct nameidata {
85100 unsigned seq, m_seq;
85101 int last_type;
85102 unsigned depth;
85103- char *saved_names[MAX_NESTED_LINKS + 1];
85104+ const char *saved_names[MAX_NESTED_LINKS + 1];
85105 };
85106
85107 /*
85108@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
85109
85110 extern void nd_jump_link(struct nameidata *nd, struct path *path);
85111
85112-static inline void nd_set_link(struct nameidata *nd, char *path)
85113+static inline void nd_set_link(struct nameidata *nd, const char *path)
85114 {
85115 nd->saved_names[nd->depth] = path;
85116 }
85117
85118-static inline char *nd_get_link(struct nameidata *nd)
85119+static inline const char *nd_get_link(const struct nameidata *nd)
85120 {
85121 return nd->saved_names[nd->depth];
85122 }
85123diff --git a/include/linux/net.h b/include/linux/net.h
85124index 17d8339..81656c0 100644
85125--- a/include/linux/net.h
85126+++ b/include/linux/net.h
85127@@ -192,7 +192,7 @@ struct net_proto_family {
85128 int (*create)(struct net *net, struct socket *sock,
85129 int protocol, int kern);
85130 struct module *owner;
85131-};
85132+} __do_const;
85133
85134 struct iovec;
85135 struct kvec;
85136diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
85137index 66f9a04..056078d 100644
85138--- a/include/linux/netdevice.h
85139+++ b/include/linux/netdevice.h
85140@@ -1145,6 +1145,7 @@ struct net_device_ops {
85141 void *priv);
85142 int (*ndo_get_lock_subclass)(struct net_device *dev);
85143 };
85144+typedef struct net_device_ops __no_const net_device_ops_no_const;
85145
85146 /**
85147 * enum net_device_priv_flags - &struct net_device priv_flags
85148@@ -1312,11 +1313,11 @@ struct net_device {
85149 struct net_device_stats stats;
85150
85151 /* dropped packets by core network, Do not use this in drivers */
85152- atomic_long_t rx_dropped;
85153- atomic_long_t tx_dropped;
85154+ atomic_long_unchecked_t rx_dropped;
85155+ atomic_long_unchecked_t tx_dropped;
85156
85157 /* Stats to monitor carrier on<->off transitions */
85158- atomic_t carrier_changes;
85159+ atomic_unchecked_t carrier_changes;
85160
85161 #ifdef CONFIG_WIRELESS_EXT
85162 /* List of functions to handle Wireless Extensions (instead of ioctl).
85163diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
85164index 2077489..a15e561 100644
85165--- a/include/linux/netfilter.h
85166+++ b/include/linux/netfilter.h
85167@@ -84,7 +84,7 @@ struct nf_sockopt_ops {
85168 #endif
85169 /* Use the module struct to lock set/get code in place */
85170 struct module *owner;
85171-};
85172+} __do_const;
85173
85174 /* Function to register/unregister hook points. */
85175 int nf_register_hook(struct nf_hook_ops *reg);
85176diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
85177index e955d47..04a5338 100644
85178--- a/include/linux/netfilter/nfnetlink.h
85179+++ b/include/linux/netfilter/nfnetlink.h
85180@@ -19,7 +19,7 @@ struct nfnl_callback {
85181 const struct nlattr * const cda[]);
85182 const struct nla_policy *policy; /* netlink attribute policy */
85183 const u_int16_t attr_count; /* number of nlattr's */
85184-};
85185+} __do_const;
85186
85187 struct nfnetlink_subsystem {
85188 const char *name;
85189diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
85190new file mode 100644
85191index 0000000..33f4af8
85192--- /dev/null
85193+++ b/include/linux/netfilter/xt_gradm.h
85194@@ -0,0 +1,9 @@
85195+#ifndef _LINUX_NETFILTER_XT_GRADM_H
85196+#define _LINUX_NETFILTER_XT_GRADM_H 1
85197+
85198+struct xt_gradm_mtinfo {
85199+ __u16 flags;
85200+ __u16 invflags;
85201+};
85202+
85203+#endif
85204diff --git a/include/linux/nls.h b/include/linux/nls.h
85205index 520681b..2b7fabb 100644
85206--- a/include/linux/nls.h
85207+++ b/include/linux/nls.h
85208@@ -31,7 +31,7 @@ struct nls_table {
85209 const unsigned char *charset2upper;
85210 struct module *owner;
85211 struct nls_table *next;
85212-};
85213+} __do_const;
85214
85215 /* this value hold the maximum octet of charset */
85216 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
85217@@ -46,7 +46,7 @@ enum utf16_endian {
85218 /* nls_base.c */
85219 extern int __register_nls(struct nls_table *, struct module *);
85220 extern int unregister_nls(struct nls_table *);
85221-extern struct nls_table *load_nls(char *);
85222+extern struct nls_table *load_nls(const char *);
85223 extern void unload_nls(struct nls_table *);
85224 extern struct nls_table *load_nls_default(void);
85225 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
85226diff --git a/include/linux/notifier.h b/include/linux/notifier.h
85227index d14a4c3..a078786 100644
85228--- a/include/linux/notifier.h
85229+++ b/include/linux/notifier.h
85230@@ -54,7 +54,8 @@ struct notifier_block {
85231 notifier_fn_t notifier_call;
85232 struct notifier_block __rcu *next;
85233 int priority;
85234-};
85235+} __do_const;
85236+typedef struct notifier_block __no_const notifier_block_no_const;
85237
85238 struct atomic_notifier_head {
85239 spinlock_t lock;
85240diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
85241index b2a0f15..4d7da32 100644
85242--- a/include/linux/oprofile.h
85243+++ b/include/linux/oprofile.h
85244@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
85245 int oprofilefs_create_ro_ulong(struct dentry * root,
85246 char const * name, ulong * val);
85247
85248-/** Create a file for read-only access to an atomic_t. */
85249+/** Create a file for read-only access to an atomic_unchecked_t. */
85250 int oprofilefs_create_ro_atomic(struct dentry * root,
85251- char const * name, atomic_t * val);
85252+ char const * name, atomic_unchecked_t * val);
85253
85254 /** create a directory */
85255 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
85256diff --git a/include/linux/padata.h b/include/linux/padata.h
85257index 4386946..f50c615 100644
85258--- a/include/linux/padata.h
85259+++ b/include/linux/padata.h
85260@@ -129,7 +129,7 @@ struct parallel_data {
85261 struct padata_serial_queue __percpu *squeue;
85262 atomic_t reorder_objects;
85263 atomic_t refcnt;
85264- atomic_t seq_nr;
85265+ atomic_unchecked_t seq_nr;
85266 struct padata_cpumask cpumask;
85267 spinlock_t lock ____cacheline_aligned;
85268 unsigned int processed;
85269diff --git a/include/linux/path.h b/include/linux/path.h
85270index d137218..be0c176 100644
85271--- a/include/linux/path.h
85272+++ b/include/linux/path.h
85273@@ -1,13 +1,15 @@
85274 #ifndef _LINUX_PATH_H
85275 #define _LINUX_PATH_H
85276
85277+#include <linux/compiler.h>
85278+
85279 struct dentry;
85280 struct vfsmount;
85281
85282 struct path {
85283 struct vfsmount *mnt;
85284 struct dentry *dentry;
85285-};
85286+} __randomize_layout;
85287
85288 extern void path_get(const struct path *);
85289 extern void path_put(const struct path *);
85290diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
85291index 5f2e559..7d59314 100644
85292--- a/include/linux/pci_hotplug.h
85293+++ b/include/linux/pci_hotplug.h
85294@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
85295 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
85296 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
85297 int (*reset_slot) (struct hotplug_slot *slot, int probe);
85298-};
85299+} __do_const;
85300+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
85301
85302 /**
85303 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
85304diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
85305index 707617a..28a2e7e 100644
85306--- a/include/linux/perf_event.h
85307+++ b/include/linux/perf_event.h
85308@@ -339,8 +339,8 @@ struct perf_event {
85309
85310 enum perf_event_active_state state;
85311 unsigned int attach_state;
85312- local64_t count;
85313- atomic64_t child_count;
85314+ local64_t count; /* PaX: fix it one day */
85315+ atomic64_unchecked_t child_count;
85316
85317 /*
85318 * These are the total time in nanoseconds that the event
85319@@ -391,8 +391,8 @@ struct perf_event {
85320 * These accumulate total time (in nanoseconds) that children
85321 * events have been enabled and running, respectively.
85322 */
85323- atomic64_t child_total_time_enabled;
85324- atomic64_t child_total_time_running;
85325+ atomic64_unchecked_t child_total_time_enabled;
85326+ atomic64_unchecked_t child_total_time_running;
85327
85328 /*
85329 * Protect attach/detach and child_list:
85330@@ -722,7 +722,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
85331 entry->ip[entry->nr++] = ip;
85332 }
85333
85334-extern int sysctl_perf_event_paranoid;
85335+extern int sysctl_perf_event_legitimately_concerned;
85336 extern int sysctl_perf_event_mlock;
85337 extern int sysctl_perf_event_sample_rate;
85338 extern int sysctl_perf_cpu_time_max_percent;
85339@@ -737,19 +737,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
85340 loff_t *ppos);
85341
85342
85343+static inline bool perf_paranoid_any(void)
85344+{
85345+ return sysctl_perf_event_legitimately_concerned > 2;
85346+}
85347+
85348 static inline bool perf_paranoid_tracepoint_raw(void)
85349 {
85350- return sysctl_perf_event_paranoid > -1;
85351+ return sysctl_perf_event_legitimately_concerned > -1;
85352 }
85353
85354 static inline bool perf_paranoid_cpu(void)
85355 {
85356- return sysctl_perf_event_paranoid > 0;
85357+ return sysctl_perf_event_legitimately_concerned > 0;
85358 }
85359
85360 static inline bool perf_paranoid_kernel(void)
85361 {
85362- return sysctl_perf_event_paranoid > 1;
85363+ return sysctl_perf_event_legitimately_concerned > 1;
85364 }
85365
85366 extern void perf_event_init(void);
85367@@ -880,7 +885,7 @@ struct perf_pmu_events_attr {
85368 struct device_attribute attr;
85369 u64 id;
85370 const char *event_str;
85371-};
85372+} __do_const;
85373
85374 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
85375 static struct perf_pmu_events_attr _var = { \
85376diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
85377index 7246ef3..1539ea4 100644
85378--- a/include/linux/pid_namespace.h
85379+++ b/include/linux/pid_namespace.h
85380@@ -43,7 +43,7 @@ struct pid_namespace {
85381 int hide_pid;
85382 int reboot; /* group exit code if this pidns was rebooted */
85383 unsigned int proc_inum;
85384-};
85385+} __randomize_layout;
85386
85387 extern struct pid_namespace init_pid_ns;
85388
85389diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
85390index eb8b8ac..62649e1 100644
85391--- a/include/linux/pipe_fs_i.h
85392+++ b/include/linux/pipe_fs_i.h
85393@@ -47,10 +47,10 @@ struct pipe_inode_info {
85394 struct mutex mutex;
85395 wait_queue_head_t wait;
85396 unsigned int nrbufs, curbuf, buffers;
85397- unsigned int readers;
85398- unsigned int writers;
85399- unsigned int files;
85400- unsigned int waiting_writers;
85401+ atomic_t readers;
85402+ atomic_t writers;
85403+ atomic_t files;
85404+ atomic_t waiting_writers;
85405 unsigned int r_counter;
85406 unsigned int w_counter;
85407 struct page *tmp_page;
85408diff --git a/include/linux/pm.h b/include/linux/pm.h
85409index 72c0fe0..26918ed 100644
85410--- a/include/linux/pm.h
85411+++ b/include/linux/pm.h
85412@@ -620,6 +620,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
85413 struct dev_pm_domain {
85414 struct dev_pm_ops ops;
85415 };
85416+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
85417
85418 /*
85419 * The PM_EVENT_ messages are also used by drivers implementing the legacy
85420diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
85421index 7c1d252..0e7061d 100644
85422--- a/include/linux/pm_domain.h
85423+++ b/include/linux/pm_domain.h
85424@@ -44,11 +44,11 @@ struct gpd_dev_ops {
85425 int (*thaw_early)(struct device *dev);
85426 int (*thaw)(struct device *dev);
85427 bool (*active_wakeup)(struct device *dev);
85428-};
85429+} __no_const;
85430
85431 struct gpd_cpu_data {
85432 unsigned int saved_exit_latency;
85433- struct cpuidle_state *idle_state;
85434+ cpuidle_state_no_const *idle_state;
85435 };
85436
85437 struct generic_pm_domain {
85438diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
85439index 43fd671..08c96ee 100644
85440--- a/include/linux/pm_runtime.h
85441+++ b/include/linux/pm_runtime.h
85442@@ -118,7 +118,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
85443
85444 static inline void pm_runtime_mark_last_busy(struct device *dev)
85445 {
85446- ACCESS_ONCE(dev->power.last_busy) = jiffies;
85447+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
85448 }
85449
85450 #else /* !CONFIG_PM_RUNTIME */
85451diff --git a/include/linux/pnp.h b/include/linux/pnp.h
85452index 195aafc..49a7bc2 100644
85453--- a/include/linux/pnp.h
85454+++ b/include/linux/pnp.h
85455@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
85456 struct pnp_fixup {
85457 char id[7];
85458 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
85459-};
85460+} __do_const;
85461
85462 /* config parameters */
85463 #define PNP_CONFIG_NORMAL 0x0001
85464diff --git a/include/linux/poison.h b/include/linux/poison.h
85465index 2110a81..13a11bb 100644
85466--- a/include/linux/poison.h
85467+++ b/include/linux/poison.h
85468@@ -19,8 +19,8 @@
85469 * under normal circumstances, used to verify that nobody uses
85470 * non-initialized list entries.
85471 */
85472-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
85473-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
85474+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
85475+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
85476
85477 /********** include/linux/timer.h **********/
85478 /*
85479diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
85480index d8b187c3..9a9257a 100644
85481--- a/include/linux/power/smartreflex.h
85482+++ b/include/linux/power/smartreflex.h
85483@@ -238,7 +238,7 @@ struct omap_sr_class_data {
85484 int (*notify)(struct omap_sr *sr, u32 status);
85485 u8 notify_flags;
85486 u8 class_type;
85487-};
85488+} __do_const;
85489
85490 /**
85491 * struct omap_sr_nvalue_table - Smartreflex n-target value info
85492diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
85493index 4ea1d37..80f4b33 100644
85494--- a/include/linux/ppp-comp.h
85495+++ b/include/linux/ppp-comp.h
85496@@ -84,7 +84,7 @@ struct compressor {
85497 struct module *owner;
85498 /* Extra skb space needed by the compressor algorithm */
85499 unsigned int comp_extra;
85500-};
85501+} __do_const;
85502
85503 /*
85504 * The return value from decompress routine is the length of the
85505diff --git a/include/linux/preempt.h b/include/linux/preempt.h
85506index de83b4e..c4b997d 100644
85507--- a/include/linux/preempt.h
85508+++ b/include/linux/preempt.h
85509@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
85510 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
85511 #endif
85512
85513+#define raw_preempt_count_add(val) __preempt_count_add(val)
85514+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
85515+
85516 #define __preempt_count_inc() __preempt_count_add(1)
85517 #define __preempt_count_dec() __preempt_count_sub(1)
85518
85519 #define preempt_count_inc() preempt_count_add(1)
85520+#define raw_preempt_count_inc() raw_preempt_count_add(1)
85521 #define preempt_count_dec() preempt_count_sub(1)
85522+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
85523
85524 #ifdef CONFIG_PREEMPT_COUNT
85525
85526@@ -41,6 +46,12 @@ do { \
85527 barrier(); \
85528 } while (0)
85529
85530+#define raw_preempt_disable() \
85531+do { \
85532+ raw_preempt_count_inc(); \
85533+ barrier(); \
85534+} while (0)
85535+
85536 #define sched_preempt_enable_no_resched() \
85537 do { \
85538 barrier(); \
85539@@ -49,6 +60,12 @@ do { \
85540
85541 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
85542
85543+#define raw_preempt_enable_no_resched() \
85544+do { \
85545+ barrier(); \
85546+ raw_preempt_count_dec(); \
85547+} while (0)
85548+
85549 #ifdef CONFIG_PREEMPT
85550 #define preempt_enable() \
85551 do { \
85552@@ -113,8 +130,10 @@ do { \
85553 * region.
85554 */
85555 #define preempt_disable() barrier()
85556+#define raw_preempt_disable() barrier()
85557 #define sched_preempt_enable_no_resched() barrier()
85558 #define preempt_enable_no_resched() barrier()
85559+#define raw_preempt_enable_no_resched() barrier()
85560 #define preempt_enable() barrier()
85561 #define preempt_check_resched() do { } while (0)
85562
85563@@ -128,11 +147,13 @@ do { \
85564 /*
85565 * Modules have no business playing preemption tricks.
85566 */
85567+#ifndef CONFIG_PAX_KERNEXEC
85568 #undef sched_preempt_enable_no_resched
85569 #undef preempt_enable_no_resched
85570 #undef preempt_enable_no_resched_notrace
85571 #undef preempt_check_resched
85572 #endif
85573+#endif
85574
85575 #define preempt_set_need_resched() \
85576 do { \
85577diff --git a/include/linux/printk.h b/include/linux/printk.h
85578index 319ff7e..608849a 100644
85579--- a/include/linux/printk.h
85580+++ b/include/linux/printk.h
85581@@ -121,6 +121,8 @@ static inline __printf(1, 2) __cold
85582 void early_printk(const char *s, ...) { }
85583 #endif
85584
85585+extern int kptr_restrict;
85586+
85587 #ifdef CONFIG_PRINTK
85588 asmlinkage __printf(5, 0)
85589 int vprintk_emit(int facility, int level,
85590@@ -155,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
85591
85592 extern int printk_delay_msec;
85593 extern int dmesg_restrict;
85594-extern int kptr_restrict;
85595
85596 extern void wake_up_klogd(void);
85597
85598diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
85599index 9d117f6..d832b31 100644
85600--- a/include/linux/proc_fs.h
85601+++ b/include/linux/proc_fs.h
85602@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
85603 extern struct proc_dir_entry *proc_symlink(const char *,
85604 struct proc_dir_entry *, const char *);
85605 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
85606+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
85607 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
85608 struct proc_dir_entry *, void *);
85609+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
85610+ struct proc_dir_entry *, void *);
85611 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
85612 struct proc_dir_entry *);
85613
85614@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
85615 return proc_create_data(name, mode, parent, proc_fops, NULL);
85616 }
85617
85618+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
85619+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
85620+{
85621+#ifdef CONFIG_GRKERNSEC_PROC_USER
85622+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
85623+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
85624+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
85625+#else
85626+ return proc_create_data(name, mode, parent, proc_fops, NULL);
85627+#endif
85628+}
85629+
85630+
85631 extern void proc_set_size(struct proc_dir_entry *, loff_t);
85632 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
85633 extern void *PDE_DATA(const struct inode *);
85634@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
85635 struct proc_dir_entry *parent,const char *dest) { return NULL;}
85636 static inline struct proc_dir_entry *proc_mkdir(const char *name,
85637 struct proc_dir_entry *parent) {return NULL;}
85638+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
85639+ struct proc_dir_entry *parent) { return NULL; }
85640 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
85641 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
85642+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
85643+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
85644 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
85645 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
85646 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
85647@@ -77,7 +97,7 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p
85648 static inline struct proc_dir_entry *proc_net_mkdir(
85649 struct net *net, const char *name, struct proc_dir_entry *parent)
85650 {
85651- return proc_mkdir_data(name, 0, parent, net);
85652+ return proc_mkdir_data_restrict(name, 0, parent, net);
85653 }
85654
85655 #endif /* _LINUX_PROC_FS_H */
85656diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
85657index 34a1e10..70f6bde 100644
85658--- a/include/linux/proc_ns.h
85659+++ b/include/linux/proc_ns.h
85660@@ -14,7 +14,7 @@ struct proc_ns_operations {
85661 void (*put)(void *ns);
85662 int (*install)(struct nsproxy *nsproxy, void *ns);
85663 unsigned int (*inum)(void *ns);
85664-};
85665+} __do_const __randomize_layout;
85666
85667 struct proc_ns {
85668 void *ns;
85669diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
85670index 7dfed71..1dc420b 100644
85671--- a/include/linux/ptp_classify.h
85672+++ b/include/linux/ptp_classify.h
85673@@ -23,8 +23,15 @@
85674 #ifndef _PTP_CLASSIFY_H_
85675 #define _PTP_CLASSIFY_H_
85676
85677+#include <linux/if_ether.h>
85678+#include <linux/if_vlan.h>
85679 #include <linux/ip.h>
85680-#include <linux/skbuff.h>
85681+#include <linux/filter.h>
85682+#ifdef __KERNEL__
85683+#include <linux/in.h>
85684+#else
85685+#include <netinet/in.h>
85686+#endif
85687
85688 #define PTP_CLASS_NONE 0x00 /* not a PTP event message */
85689 #define PTP_CLASS_V1 0x01 /* protocol version 1 */
85690@@ -37,7 +44,7 @@
85691 #define PTP_CLASS_PMASK 0xf0 /* mask for the packet type field */
85692
85693 #define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4)
85694-#define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /* probably DNE */
85695+#define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /*probably DNE*/
85696 #define PTP_CLASS_V2_IPV4 (PTP_CLASS_V2 | PTP_CLASS_IPV4)
85697 #define PTP_CLASS_V2_IPV6 (PTP_CLASS_V2 | PTP_CLASS_IPV6)
85698 #define PTP_CLASS_V2_L2 (PTP_CLASS_V2 | PTP_CLASS_L2)
85699@@ -46,34 +53,88 @@
85700 #define PTP_EV_PORT 319
85701 #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
85702
85703+#define OFF_ETYPE 12
85704+#define OFF_IHL 14
85705+#define OFF_FRAG 20
85706+#define OFF_PROTO4 23
85707+#define OFF_NEXT 6
85708+#define OFF_UDP_DST 2
85709+
85710 #define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */
85711 #define OFF_PTP_SEQUENCE_ID 30
85712 #define OFF_PTP_CONTROL 32 /* PTPv1 only */
85713
85714-/* Below defines should actually be removed at some point in time. */
85715+#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
85716+
85717 #define IP6_HLEN 40
85718 #define UDP_HLEN 8
85719-#define OFF_IHL 14
85720+
85721+#define RELOFF_DST4 (ETH_HLEN + OFF_UDP_DST)
85722+#define OFF_DST6 (ETH_HLEN + IP6_HLEN + OFF_UDP_DST)
85723 #define OFF_PTP6 (ETH_HLEN + IP6_HLEN + UDP_HLEN)
85724-#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
85725
85726-#if defined(CONFIG_NET_PTP_CLASSIFY)
85727-/**
85728- * ptp_classify_raw - classify a PTP packet
85729- * @skb: buffer
85730- *
85731- * Runs a minimal BPF dissector to classify a network packet to
85732- * determine the PTP class. In case the skb does not contain any
85733- * PTP protocol data, PTP_CLASS_NONE will be returned, otherwise
85734- * PTP_CLASS_V1_IPV{4,6}, PTP_CLASS_V2_IPV{4,6} or
85735- * PTP_CLASS_V2_{L2,VLAN}, depending on the packet content.
85736- */
85737-unsigned int ptp_classify_raw(const struct sk_buff *skb);
85738+#define OP_AND (BPF_ALU | BPF_AND | BPF_K)
85739+#define OP_JEQ (BPF_JMP | BPF_JEQ | BPF_K)
85740+#define OP_JSET (BPF_JMP | BPF_JSET | BPF_K)
85741+#define OP_LDB (BPF_LD | BPF_B | BPF_ABS)
85742+#define OP_LDH (BPF_LD | BPF_H | BPF_ABS)
85743+#define OP_LDHI (BPF_LD | BPF_H | BPF_IND)
85744+#define OP_LDX (BPF_LDX | BPF_B | BPF_MSH)
85745+#define OP_OR (BPF_ALU | BPF_OR | BPF_K)
85746+#define OP_RETA (BPF_RET | BPF_A)
85747+#define OP_RETK (BPF_RET | BPF_K)
85748
85749-void __init ptp_classifier_init(void);
85750-#else
85751-static inline void ptp_classifier_init(void)
85752+static inline int ptp_filter_init(struct sock_filter *f, int len)
85753 {
85754+ if (OP_LDH == f[0].code)
85755+ return sk_chk_filter(f, len);
85756+ else
85757+ return 0;
85758 }
85759+
85760+#define PTP_FILTER \
85761+ {OP_LDH, 0, 0, OFF_ETYPE }, /* */ \
85762+ {OP_JEQ, 0, 12, ETH_P_IP }, /* f goto L20 */ \
85763+ {OP_LDB, 0, 0, OFF_PROTO4 }, /* */ \
85764+ {OP_JEQ, 0, 9, IPPROTO_UDP }, /* f goto L10 */ \
85765+ {OP_LDH, 0, 0, OFF_FRAG }, /* */ \
85766+ {OP_JSET, 7, 0, 0x1fff }, /* t goto L11 */ \
85767+ {OP_LDX, 0, 0, OFF_IHL }, /* */ \
85768+ {OP_LDHI, 0, 0, RELOFF_DST4 }, /* */ \
85769+ {OP_JEQ, 0, 4, PTP_EV_PORT }, /* f goto L12 */ \
85770+ {OP_LDHI, 0, 0, ETH_HLEN + UDP_HLEN }, /* */ \
85771+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
85772+ {OP_OR, 0, 0, PTP_CLASS_IPV4 }, /* */ \
85773+ {OP_RETA, 0, 0, 0 }, /* */ \
85774+/*L1x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \
85775+/*L20*/ {OP_JEQ, 0, 9, ETH_P_IPV6 }, /* f goto L40 */ \
85776+ {OP_LDB, 0, 0, ETH_HLEN + OFF_NEXT }, /* */ \
85777+ {OP_JEQ, 0, 6, IPPROTO_UDP }, /* f goto L30 */ \
85778+ {OP_LDH, 0, 0, OFF_DST6 }, /* */ \
85779+ {OP_JEQ, 0, 4, PTP_EV_PORT }, /* f goto L31 */ \
85780+ {OP_LDH, 0, 0, OFF_PTP6 }, /* */ \
85781+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
85782+ {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \
85783+ {OP_RETA, 0, 0, 0 }, /* */ \
85784+/*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \
85785+/*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \
85786+ {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \
85787+ {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \
85788+ {OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
85789+ {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
85790+ {OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \
85791+ {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
85792+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
85793+ {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \
85794+ {OP_RETA, 0, 0, 0 }, /* */ \
85795+/*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \
85796+ {OP_LDB, 0, 0, ETH_HLEN }, /* */ \
85797+ {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
85798+ {OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \
85799+ {OP_LDH, 0, 0, ETH_HLEN }, /* */ \
85800+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
85801+ {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \
85802+ {OP_RETA, 0, 0, 0 }, /* */ \
85803+/*L6x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE },
85804+
85805 #endif
85806-#endif /* _PTP_CLASSIFY_H_ */
85807diff --git a/include/linux/quota.h b/include/linux/quota.h
85808index 0f3c5d3..bc559e3 100644
85809--- a/include/linux/quota.h
85810+++ b/include/linux/quota.h
85811@@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */
85812
85813 extern bool qid_eq(struct kqid left, struct kqid right);
85814 extern bool qid_lt(struct kqid left, struct kqid right);
85815-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
85816+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
85817 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
85818 extern bool qid_valid(struct kqid qid);
85819
85820diff --git a/include/linux/random.h b/include/linux/random.h
85821index 57fbbff..2170304 100644
85822--- a/include/linux/random.h
85823+++ b/include/linux/random.h
85824@@ -9,9 +9,19 @@
85825 #include <uapi/linux/random.h>
85826
85827 extern void add_device_randomness(const void *, unsigned int);
85828+
85829+static inline void add_latent_entropy(void)
85830+{
85831+
85832+#ifdef LATENT_ENTROPY_PLUGIN
85833+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
85834+#endif
85835+
85836+}
85837+
85838 extern void add_input_randomness(unsigned int type, unsigned int code,
85839- unsigned int value);
85840-extern void add_interrupt_randomness(int irq, int irq_flags);
85841+ unsigned int value) __latent_entropy;
85842+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
85843
85844 extern void get_random_bytes(void *buf, int nbytes);
85845 extern void get_random_bytes_arch(void *buf, int nbytes);
85846@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
85847 extern const struct file_operations random_fops, urandom_fops;
85848 #endif
85849
85850-unsigned int get_random_int(void);
85851+unsigned int __intentional_overflow(-1) get_random_int(void);
85852 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
85853
85854-u32 prandom_u32(void);
85855+u32 prandom_u32(void) __intentional_overflow(-1);
85856 void prandom_bytes(void *buf, int nbytes);
85857 void prandom_seed(u32 seed);
85858 void prandom_reseed_late(void);
85859@@ -37,6 +47,11 @@ struct rnd_state {
85860 u32 prandom_u32_state(struct rnd_state *state);
85861 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
85862
85863+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
85864+{
85865+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
85866+}
85867+
85868 /**
85869 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
85870 * @ep_ro: right open interval endpoint
85871@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
85872 *
85873 * Returns: pseudo-random number in interval [0, ep_ro)
85874 */
85875-static inline u32 prandom_u32_max(u32 ep_ro)
85876+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
85877 {
85878 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
85879 }
85880diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
85881index fea49b5..2ac22bb 100644
85882--- a/include/linux/rbtree_augmented.h
85883+++ b/include/linux/rbtree_augmented.h
85884@@ -80,7 +80,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
85885 old->rbaugmented = rbcompute(old); \
85886 } \
85887 rbstatic const struct rb_augment_callbacks rbname = { \
85888- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
85889+ .propagate = rbname ## _propagate, \
85890+ .copy = rbname ## _copy, \
85891+ .rotate = rbname ## _rotate \
85892 };
85893
85894
85895diff --git a/include/linux/rculist.h b/include/linux/rculist.h
85896index 8183b46..a388711 100644
85897--- a/include/linux/rculist.h
85898+++ b/include/linux/rculist.h
85899@@ -29,8 +29,8 @@
85900 */
85901 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
85902 {
85903- ACCESS_ONCE(list->next) = list;
85904- ACCESS_ONCE(list->prev) = list;
85905+ ACCESS_ONCE_RW(list->next) = list;
85906+ ACCESS_ONCE_RW(list->prev) = list;
85907 }
85908
85909 /*
85910@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
85911 struct list_head *prev, struct list_head *next);
85912 #endif
85913
85914+void __pax_list_add_rcu(struct list_head *new,
85915+ struct list_head *prev, struct list_head *next);
85916+
85917 /**
85918 * list_add_rcu - add a new entry to rcu-protected list
85919 * @new: new entry to be added
85920@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
85921 __list_add_rcu(new, head, head->next);
85922 }
85923
85924+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
85925+{
85926+ __pax_list_add_rcu(new, head, head->next);
85927+}
85928+
85929 /**
85930 * list_add_tail_rcu - add a new entry to rcu-protected list
85931 * @new: new entry to be added
85932@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
85933 __list_add_rcu(new, head->prev, head);
85934 }
85935
85936+static inline void pax_list_add_tail_rcu(struct list_head *new,
85937+ struct list_head *head)
85938+{
85939+ __pax_list_add_rcu(new, head->prev, head);
85940+}
85941+
85942 /**
85943 * list_del_rcu - deletes entry from list without re-initialization
85944 * @entry: the element to delete from the list.
85945@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
85946 entry->prev = LIST_POISON2;
85947 }
85948
85949+extern void pax_list_del_rcu(struct list_head *entry);
85950+
85951 /**
85952 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
85953 * @n: the element to delete from the hash list.
85954diff --git a/include/linux/reboot.h b/include/linux/reboot.h
85955index 48bf152..d38b785 100644
85956--- a/include/linux/reboot.h
85957+++ b/include/linux/reboot.h
85958@@ -44,9 +44,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
85959 */
85960
85961 extern void migrate_to_reboot_cpu(void);
85962-extern void machine_restart(char *cmd);
85963-extern void machine_halt(void);
85964-extern void machine_power_off(void);
85965+extern void machine_restart(char *cmd) __noreturn;
85966+extern void machine_halt(void) __noreturn;
85967+extern void machine_power_off(void) __noreturn;
85968
85969 extern void machine_shutdown(void);
85970 struct pt_regs;
85971@@ -57,9 +57,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
85972 */
85973
85974 extern void kernel_restart_prepare(char *cmd);
85975-extern void kernel_restart(char *cmd);
85976-extern void kernel_halt(void);
85977-extern void kernel_power_off(void);
85978+extern void kernel_restart(char *cmd) __noreturn;
85979+extern void kernel_halt(void) __noreturn;
85980+extern void kernel_power_off(void) __noreturn;
85981
85982 extern int C_A_D; /* for sysctl */
85983 void ctrl_alt_del(void);
85984@@ -73,7 +73,7 @@ extern int orderly_poweroff(bool force);
85985 * Emergency restart, callable from an interrupt handler.
85986 */
85987
85988-extern void emergency_restart(void);
85989+extern void emergency_restart(void) __noreturn;
85990 #include <asm/emergency-restart.h>
85991
85992 #endif /* _LINUX_REBOOT_H */
85993diff --git a/include/linux/regset.h b/include/linux/regset.h
85994index 8e0c9fe..ac4d221 100644
85995--- a/include/linux/regset.h
85996+++ b/include/linux/regset.h
85997@@ -161,7 +161,8 @@ struct user_regset {
85998 unsigned int align;
85999 unsigned int bias;
86000 unsigned int core_note_type;
86001-};
86002+} __do_const;
86003+typedef struct user_regset __no_const user_regset_no_const;
86004
86005 /**
86006 * struct user_regset_view - available regsets
86007diff --git a/include/linux/relay.h b/include/linux/relay.h
86008index d7c8359..818daf5 100644
86009--- a/include/linux/relay.h
86010+++ b/include/linux/relay.h
86011@@ -157,7 +157,7 @@ struct rchan_callbacks
86012 * The callback should return 0 if successful, negative if not.
86013 */
86014 int (*remove_buf_file)(struct dentry *dentry);
86015-};
86016+} __no_const;
86017
86018 /*
86019 * CONFIG_RELAY kernel API, kernel/relay.c
86020diff --git a/include/linux/rio.h b/include/linux/rio.h
86021index 6bda06f..bf39a9b 100644
86022--- a/include/linux/rio.h
86023+++ b/include/linux/rio.h
86024@@ -358,7 +358,7 @@ struct rio_ops {
86025 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
86026 u64 rstart, u32 size, u32 flags);
86027 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
86028-};
86029+} __no_const;
86030
86031 #define RIO_RESOURCE_MEM 0x00000100
86032 #define RIO_RESOURCE_DOORBELL 0x00000200
86033diff --git a/include/linux/rmap.h b/include/linux/rmap.h
86034index be57450..31cf65e 100644
86035--- a/include/linux/rmap.h
86036+++ b/include/linux/rmap.h
86037@@ -144,8 +144,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
86038 void anon_vma_init(void); /* create anon_vma_cachep */
86039 int anon_vma_prepare(struct vm_area_struct *);
86040 void unlink_anon_vmas(struct vm_area_struct *);
86041-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
86042-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
86043+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
86044+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
86045
86046 static inline void anon_vma_merge(struct vm_area_struct *vma,
86047 struct vm_area_struct *next)
86048diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
86049index a964f72..b475afb 100644
86050--- a/include/linux/scatterlist.h
86051+++ b/include/linux/scatterlist.h
86052@@ -1,6 +1,7 @@
86053 #ifndef _LINUX_SCATTERLIST_H
86054 #define _LINUX_SCATTERLIST_H
86055
86056+#include <linux/sched.h>
86057 #include <linux/string.h>
86058 #include <linux/bug.h>
86059 #include <linux/mm.h>
86060@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
86061 #ifdef CONFIG_DEBUG_SG
86062 BUG_ON(!virt_addr_valid(buf));
86063 #endif
86064+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86065+ if (object_starts_on_stack(buf)) {
86066+ void *adjbuf = buf - current->stack + current->lowmem_stack;
86067+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
86068+ } else
86069+#endif
86070 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
86071 }
86072
86073diff --git a/include/linux/sched.h b/include/linux/sched.h
86074index 0376b05..82054c2 100644
86075--- a/include/linux/sched.h
86076+++ b/include/linux/sched.h
86077@@ -131,6 +131,7 @@ struct fs_struct;
86078 struct perf_event_context;
86079 struct blk_plug;
86080 struct filename;
86081+struct linux_binprm;
86082
86083 #define VMACACHE_BITS 2
86084 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
86085@@ -374,7 +375,7 @@ extern char __sched_text_start[], __sched_text_end[];
86086 extern int in_sched_functions(unsigned long addr);
86087
86088 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
86089-extern signed long schedule_timeout(signed long timeout);
86090+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
86091 extern signed long schedule_timeout_interruptible(signed long timeout);
86092 extern signed long schedule_timeout_killable(signed long timeout);
86093 extern signed long schedule_timeout_uninterruptible(signed long timeout);
86094@@ -385,6 +386,19 @@ struct nsproxy;
86095 struct user_namespace;
86096
86097 #ifdef CONFIG_MMU
86098+
86099+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
86100+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
86101+#else
86102+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
86103+{
86104+ return 0;
86105+}
86106+#endif
86107+
86108+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
86109+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
86110+
86111 extern void arch_pick_mmap_layout(struct mm_struct *mm);
86112 extern unsigned long
86113 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
86114@@ -682,6 +696,17 @@ struct signal_struct {
86115 #ifdef CONFIG_TASKSTATS
86116 struct taskstats *stats;
86117 #endif
86118+
86119+#ifdef CONFIG_GRKERNSEC
86120+ u32 curr_ip;
86121+ u32 saved_ip;
86122+ u32 gr_saddr;
86123+ u32 gr_daddr;
86124+ u16 gr_sport;
86125+ u16 gr_dport;
86126+ u8 used_accept:1;
86127+#endif
86128+
86129 #ifdef CONFIG_AUDIT
86130 unsigned audit_tty;
86131 unsigned audit_tty_log_passwd;
86132@@ -708,7 +733,7 @@ struct signal_struct {
86133 struct mutex cred_guard_mutex; /* guard against foreign influences on
86134 * credential calculations
86135 * (notably. ptrace) */
86136-};
86137+} __randomize_layout;
86138
86139 /*
86140 * Bits in flags field of signal_struct.
86141@@ -761,6 +786,14 @@ struct user_struct {
86142 struct key *session_keyring; /* UID's default session keyring */
86143 #endif
86144
86145+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
86146+ unsigned char kernel_banned;
86147+#endif
86148+#ifdef CONFIG_GRKERNSEC_BRUTE
86149+ unsigned char suid_banned;
86150+ unsigned long suid_ban_expires;
86151+#endif
86152+
86153 /* Hash table maintenance information */
86154 struct hlist_node uidhash_node;
86155 kuid_t uid;
86156@@ -768,7 +801,7 @@ struct user_struct {
86157 #ifdef CONFIG_PERF_EVENTS
86158 atomic_long_t locked_vm;
86159 #endif
86160-};
86161+} __randomize_layout;
86162
86163 extern int uids_sysfs_init(void);
86164
86165@@ -1224,6 +1257,9 @@ enum perf_event_task_context {
86166 struct task_struct {
86167 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
86168 void *stack;
86169+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86170+ void *lowmem_stack;
86171+#endif
86172 atomic_t usage;
86173 unsigned int flags; /* per process flags, defined below */
86174 unsigned int ptrace;
86175@@ -1349,8 +1385,8 @@ struct task_struct {
86176 struct list_head thread_node;
86177
86178 struct completion *vfork_done; /* for vfork() */
86179- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
86180- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
86181+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
86182+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
86183
86184 cputime_t utime, stime, utimescaled, stimescaled;
86185 cputime_t gtime;
86186@@ -1375,11 +1411,6 @@ struct task_struct {
86187 struct task_cputime cputime_expires;
86188 struct list_head cpu_timers[3];
86189
86190-/* process credentials */
86191- const struct cred __rcu *real_cred; /* objective and real subjective task
86192- * credentials (COW) */
86193- const struct cred __rcu *cred; /* effective (overridable) subjective task
86194- * credentials (COW) */
86195 char comm[TASK_COMM_LEN]; /* executable name excluding path
86196 - access with [gs]et_task_comm (which lock
86197 it with task_lock())
86198@@ -1396,6 +1427,10 @@ struct task_struct {
86199 #endif
86200 /* CPU-specific state of this task */
86201 struct thread_struct thread;
86202+/* thread_info moved to task_struct */
86203+#ifdef CONFIG_X86
86204+ struct thread_info tinfo;
86205+#endif
86206 /* filesystem information */
86207 struct fs_struct *fs;
86208 /* open file information */
86209@@ -1472,6 +1507,10 @@ struct task_struct {
86210 gfp_t lockdep_reclaim_gfp;
86211 #endif
86212
86213+/* process credentials */
86214+ const struct cred __rcu *real_cred; /* objective and real subjective task
86215+ * credentials (COW) */
86216+
86217 /* journalling filesystem info */
86218 void *journal_info;
86219
86220@@ -1510,6 +1549,10 @@ struct task_struct {
86221 /* cg_list protected by css_set_lock and tsk->alloc_lock */
86222 struct list_head cg_list;
86223 #endif
86224+
86225+ const struct cred __rcu *cred; /* effective (overridable) subjective task
86226+ * credentials (COW) */
86227+
86228 #ifdef CONFIG_FUTEX
86229 struct robust_list_head __user *robust_list;
86230 #ifdef CONFIG_COMPAT
86231@@ -1655,7 +1698,78 @@ struct task_struct {
86232 unsigned int sequential_io;
86233 unsigned int sequential_io_avg;
86234 #endif
86235-};
86236+
86237+#ifdef CONFIG_GRKERNSEC
86238+ /* grsecurity */
86239+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
86240+ u64 exec_id;
86241+#endif
86242+#ifdef CONFIG_GRKERNSEC_SETXID
86243+ const struct cred *delayed_cred;
86244+#endif
86245+ struct dentry *gr_chroot_dentry;
86246+ struct acl_subject_label *acl;
86247+ struct acl_subject_label *tmpacl;
86248+ struct acl_role_label *role;
86249+ struct file *exec_file;
86250+ unsigned long brute_expires;
86251+ u16 acl_role_id;
86252+ u8 inherited;
86253+ /* is this the task that authenticated to the special role */
86254+ u8 acl_sp_role;
86255+ u8 is_writable;
86256+ u8 brute;
86257+ u8 gr_is_chrooted;
86258+#endif
86259+
86260+} __randomize_layout;
86261+
86262+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
86263+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
86264+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
86265+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
86266+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
86267+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
86268+
86269+#ifdef CONFIG_PAX_SOFTMODE
86270+extern int pax_softmode;
86271+#endif
86272+
86273+extern int pax_check_flags(unsigned long *);
86274+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
86275+
86276+/* if tsk != current then task_lock must be held on it */
86277+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
86278+static inline unsigned long pax_get_flags(struct task_struct *tsk)
86279+{
86280+ if (likely(tsk->mm))
86281+ return tsk->mm->pax_flags;
86282+ else
86283+ return 0UL;
86284+}
86285+
86286+/* if tsk != current then task_lock must be held on it */
86287+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
86288+{
86289+ if (likely(tsk->mm)) {
86290+ tsk->mm->pax_flags = flags;
86291+ return 0;
86292+ }
86293+ return -EINVAL;
86294+}
86295+#endif
86296+
86297+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
86298+extern void pax_set_initial_flags(struct linux_binprm *bprm);
86299+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
86300+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
86301+#endif
86302+
86303+struct path;
86304+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
86305+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
86306+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
86307+extern void pax_report_refcount_overflow(struct pt_regs *regs);
86308
86309 /* Future-safe accessor for struct task_struct's cpus_allowed. */
86310 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
86311@@ -1737,7 +1851,7 @@ struct pid_namespace;
86312 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
86313 struct pid_namespace *ns);
86314
86315-static inline pid_t task_pid_nr(struct task_struct *tsk)
86316+static inline pid_t task_pid_nr(const struct task_struct *tsk)
86317 {
86318 return tsk->pid;
86319 }
86320@@ -2084,6 +2198,25 @@ extern u64 sched_clock_cpu(int cpu);
86321
86322 extern void sched_clock_init(void);
86323
86324+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86325+static inline void populate_stack(void)
86326+{
86327+ struct task_struct *curtask = current;
86328+ int c;
86329+ int *ptr = curtask->stack;
86330+ int *end = curtask->stack + THREAD_SIZE;
86331+
86332+ while (ptr < end) {
86333+ c = *(volatile int *)ptr;
86334+ ptr += PAGE_SIZE/sizeof(int);
86335+ }
86336+}
86337+#else
86338+static inline void populate_stack(void)
86339+{
86340+}
86341+#endif
86342+
86343 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
86344 static inline void sched_clock_tick(void)
86345 {
86346@@ -2217,7 +2350,9 @@ void yield(void);
86347 extern struct exec_domain default_exec_domain;
86348
86349 union thread_union {
86350+#ifndef CONFIG_X86
86351 struct thread_info thread_info;
86352+#endif
86353 unsigned long stack[THREAD_SIZE/sizeof(long)];
86354 };
86355
86356@@ -2250,6 +2385,7 @@ extern struct pid_namespace init_pid_ns;
86357 */
86358
86359 extern struct task_struct *find_task_by_vpid(pid_t nr);
86360+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
86361 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
86362 struct pid_namespace *ns);
86363
86364@@ -2412,7 +2548,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
86365 extern void exit_itimers(struct signal_struct *);
86366 extern void flush_itimer_signals(void);
86367
86368-extern void do_group_exit(int);
86369+extern __noreturn void do_group_exit(int);
86370
86371 extern int do_execve(struct filename *,
86372 const char __user * const __user *,
86373@@ -2614,9 +2750,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
86374
86375 #endif
86376
86377-static inline int object_is_on_stack(void *obj)
86378+static inline int object_starts_on_stack(const void *obj)
86379 {
86380- void *stack = task_stack_page(current);
86381+ const void *stack = task_stack_page(current);
86382
86383 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
86384 }
86385diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
86386index 596a0e0..bea77ec 100644
86387--- a/include/linux/sched/sysctl.h
86388+++ b/include/linux/sched/sysctl.h
86389@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
86390 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
86391
86392 extern int sysctl_max_map_count;
86393+extern unsigned long sysctl_heap_stack_gap;
86394
86395 extern unsigned int sysctl_sched_latency;
86396 extern unsigned int sysctl_sched_min_granularity;
86397diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
86398index 4054b09..6f19cfd 100644
86399--- a/include/linux/seccomp.h
86400+++ b/include/linux/seccomp.h
86401@@ -76,6 +76,7 @@ static inline int seccomp_mode(struct seccomp *s)
86402 #ifdef CONFIG_SECCOMP_FILTER
86403 extern void put_seccomp_filter(struct task_struct *tsk);
86404 extern void get_seccomp_filter(struct task_struct *tsk);
86405+extern u32 seccomp_bpf_load(int off);
86406 #else /* CONFIG_SECCOMP_FILTER */
86407 static inline void put_seccomp_filter(struct task_struct *tsk)
86408 {
86409diff --git a/include/linux/security.h b/include/linux/security.h
86410index 9c6b972..7e7c704 100644
86411--- a/include/linux/security.h
86412+++ b/include/linux/security.h
86413@@ -27,6 +27,7 @@
86414 #include <linux/slab.h>
86415 #include <linux/err.h>
86416 #include <linux/string.h>
86417+#include <linux/grsecurity.h>
86418
86419 struct linux_binprm;
86420 struct cred;
86421@@ -116,8 +117,6 @@ struct seq_file;
86422
86423 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
86424
86425-void reset_security_ops(void);
86426-
86427 #ifdef CONFIG_MMU
86428 extern unsigned long mmap_min_addr;
86429 extern unsigned long dac_mmap_min_addr;
86430@@ -1719,7 +1718,7 @@ struct security_operations {
86431 struct audit_context *actx);
86432 void (*audit_rule_free) (void *lsmrule);
86433 #endif /* CONFIG_AUDIT */
86434-};
86435+} __randomize_layout;
86436
86437 /* prototypes */
86438 extern int security_init(void);
86439diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
86440index dc368b8..e895209 100644
86441--- a/include/linux/semaphore.h
86442+++ b/include/linux/semaphore.h
86443@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
86444 }
86445
86446 extern void down(struct semaphore *sem);
86447-extern int __must_check down_interruptible(struct semaphore *sem);
86448+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
86449 extern int __must_check down_killable(struct semaphore *sem);
86450 extern int __must_check down_trylock(struct semaphore *sem);
86451 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
86452diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
86453index 52e0097..383f21d 100644
86454--- a/include/linux/seq_file.h
86455+++ b/include/linux/seq_file.h
86456@@ -27,6 +27,9 @@ struct seq_file {
86457 struct mutex lock;
86458 const struct seq_operations *op;
86459 int poll_event;
86460+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
86461+ u64 exec_id;
86462+#endif
86463 #ifdef CONFIG_USER_NS
86464 struct user_namespace *user_ns;
86465 #endif
86466@@ -39,6 +42,7 @@ struct seq_operations {
86467 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
86468 int (*show) (struct seq_file *m, void *v);
86469 };
86470+typedef struct seq_operations __no_const seq_operations_no_const;
86471
86472 #define SEQ_SKIP 1
86473
86474@@ -96,6 +100,7 @@ void seq_pad(struct seq_file *m, char c);
86475
86476 char *mangle_path(char *s, const char *p, const char *esc);
86477 int seq_open(struct file *, const struct seq_operations *);
86478+int seq_open_restrict(struct file *, const struct seq_operations *);
86479 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
86480 loff_t seq_lseek(struct file *, loff_t, int);
86481 int seq_release(struct inode *, struct file *);
86482@@ -138,6 +143,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
86483 }
86484
86485 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
86486+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
86487 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
86488 int single_release(struct inode *, struct file *);
86489 void *__seq_open_private(struct file *, const struct seq_operations *, int);
86490diff --git a/include/linux/shm.h b/include/linux/shm.h
86491index 57d7770..0936af6 100644
86492--- a/include/linux/shm.h
86493+++ b/include/linux/shm.h
86494@@ -20,6 +20,10 @@ struct shmid_kernel /* private to the kernel */
86495
86496 /* The task created the shm object. NULL if the task is dead. */
86497 struct task_struct *shm_creator;
86498+#ifdef CONFIG_GRKERNSEC
86499+ time_t shm_createtime;
86500+ pid_t shm_lapid;
86501+#endif
86502 };
86503
86504 /* shm_mode upper byte flags */
86505diff --git a/include/linux/signal.h b/include/linux/signal.h
86506index c9e6536..923b302 100644
86507--- a/include/linux/signal.h
86508+++ b/include/linux/signal.h
86509@@ -293,7 +293,7 @@ static inline void allow_signal(int sig)
86510 * know it'll be handled, so that they don't get converted to
86511 * SIGKILL or just silently dropped.
86512 */
86513- kernel_sigaction(sig, (__force __sighandler_t)2);
86514+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
86515 }
86516
86517 static inline void disallow_signal(int sig)
86518diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
86519index ec89301..4fd29a6 100644
86520--- a/include/linux/skbuff.h
86521+++ b/include/linux/skbuff.h
86522@@ -725,7 +725,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
86523 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
86524 int node);
86525 struct sk_buff *build_skb(void *data, unsigned int frag_size);
86526-static inline struct sk_buff *alloc_skb(unsigned int size,
86527+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
86528 gfp_t priority)
86529 {
86530 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
86531@@ -1839,7 +1839,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
86532 return skb->inner_transport_header - skb->inner_network_header;
86533 }
86534
86535-static inline int skb_network_offset(const struct sk_buff *skb)
86536+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
86537 {
86538 return skb_network_header(skb) - skb->data;
86539 }
86540@@ -1911,7 +1911,7 @@ static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
86541 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
86542 */
86543 #ifndef NET_SKB_PAD
86544-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
86545+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
86546 #endif
86547
86548 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
86549@@ -2518,7 +2518,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
86550 int *err);
86551 unsigned int datagram_poll(struct file *file, struct socket *sock,
86552 struct poll_table_struct *wait);
86553-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
86554+int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
86555 struct iovec *to, int size);
86556 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
86557 struct iovec *iov);
86558@@ -2664,6 +2664,8 @@ static inline ktime_t net_invalid_timestamp(void)
86559 return ktime_set(0, 0);
86560 }
86561
86562+void skb_timestamping_init(void);
86563+
86564 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
86565
86566 void skb_clone_tx_timestamp(struct sk_buff *skb);
86567@@ -2907,6 +2909,9 @@ static inline void nf_reset(struct sk_buff *skb)
86568 nf_bridge_put(skb->nf_bridge);
86569 skb->nf_bridge = NULL;
86570 #endif
86571+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
86572+ skb->nf_trace = 0;
86573+#endif
86574 }
86575
86576 static inline void nf_reset_trace(struct sk_buff *skb)
86577diff --git a/include/linux/slab.h b/include/linux/slab.h
86578index 1d9abb7..b1e8b10 100644
86579--- a/include/linux/slab.h
86580+++ b/include/linux/slab.h
86581@@ -14,15 +14,29 @@
86582 #include <linux/gfp.h>
86583 #include <linux/types.h>
86584 #include <linux/workqueue.h>
86585-
86586+#include <linux/err.h>
86587
86588 /*
86589 * Flags to pass to kmem_cache_create().
86590 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
86591 */
86592 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
86593+
86594+#ifdef CONFIG_PAX_USERCOPY_SLABS
86595+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
86596+#else
86597+#define SLAB_USERCOPY 0x00000000UL
86598+#endif
86599+
86600 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
86601 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
86602+
86603+#ifdef CONFIG_PAX_MEMORY_SANITIZE
86604+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
86605+#else
86606+#define SLAB_NO_SANITIZE 0x00000000UL
86607+#endif
86608+
86609 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
86610 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
86611 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
86612@@ -98,10 +112,13 @@
86613 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
86614 * Both make kfree a no-op.
86615 */
86616-#define ZERO_SIZE_PTR ((void *)16)
86617+#define ZERO_SIZE_PTR \
86618+({ \
86619+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
86620+ (void *)(-MAX_ERRNO-1L); \
86621+})
86622
86623-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
86624- (unsigned long)ZERO_SIZE_PTR)
86625+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
86626
86627 #include <linux/kmemleak.h>
86628
86629@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
86630 void kfree(const void *);
86631 void kzfree(const void *);
86632 size_t ksize(const void *);
86633+const char *check_heap_object(const void *ptr, unsigned long n);
86634+bool is_usercopy_object(const void *ptr);
86635
86636 /*
86637 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
86638@@ -176,7 +195,7 @@ struct kmem_cache {
86639 unsigned int align; /* Alignment as calculated */
86640 unsigned long flags; /* Active flags on the slab */
86641 const char *name; /* Slab name for sysfs */
86642- int refcount; /* Use counter */
86643+ atomic_t refcount; /* Use counter */
86644 void (*ctor)(void *); /* Called on object slot creation */
86645 struct list_head list; /* List of all slab caches on the system */
86646 };
86647@@ -261,6 +280,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
86648 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
86649 #endif
86650
86651+#ifdef CONFIG_PAX_USERCOPY_SLABS
86652+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
86653+#endif
86654+
86655 /*
86656 * Figure out which kmalloc slab an allocation of a certain size
86657 * belongs to.
86658@@ -269,7 +292,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
86659 * 2 = 120 .. 192 bytes
86660 * n = 2^(n-1) .. 2^n -1
86661 */
86662-static __always_inline int kmalloc_index(size_t size)
86663+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
86664 {
86665 if (!size)
86666 return 0;
86667@@ -312,11 +335,11 @@ static __always_inline int kmalloc_index(size_t size)
86668 }
86669 #endif /* !CONFIG_SLOB */
86670
86671-void *__kmalloc(size_t size, gfp_t flags);
86672+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
86673 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
86674
86675 #ifdef CONFIG_NUMA
86676-void *__kmalloc_node(size_t size, gfp_t flags, int node);
86677+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
86678 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
86679 #else
86680 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
86681diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
86682index 8235dfb..47ce586 100644
86683--- a/include/linux/slab_def.h
86684+++ b/include/linux/slab_def.h
86685@@ -38,7 +38,7 @@ struct kmem_cache {
86686 /* 4) cache creation/removal */
86687 const char *name;
86688 struct list_head list;
86689- int refcount;
86690+ atomic_t refcount;
86691 int object_size;
86692 int align;
86693
86694@@ -54,10 +54,14 @@ struct kmem_cache {
86695 unsigned long node_allocs;
86696 unsigned long node_frees;
86697 unsigned long node_overflow;
86698- atomic_t allochit;
86699- atomic_t allocmiss;
86700- atomic_t freehit;
86701- atomic_t freemiss;
86702+ atomic_unchecked_t allochit;
86703+ atomic_unchecked_t allocmiss;
86704+ atomic_unchecked_t freehit;
86705+ atomic_unchecked_t freemiss;
86706+#ifdef CONFIG_PAX_MEMORY_SANITIZE
86707+ atomic_unchecked_t sanitized;
86708+ atomic_unchecked_t not_sanitized;
86709+#endif
86710
86711 /*
86712 * If debugging is enabled, then the allocator can add additional
86713diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
86714index d82abd4..408c3a0 100644
86715--- a/include/linux/slub_def.h
86716+++ b/include/linux/slub_def.h
86717@@ -74,7 +74,7 @@ struct kmem_cache {
86718 struct kmem_cache_order_objects max;
86719 struct kmem_cache_order_objects min;
86720 gfp_t allocflags; /* gfp flags to use on each alloc */
86721- int refcount; /* Refcount for slab cache destroy */
86722+ atomic_t refcount; /* Refcount for slab cache destroy */
86723 void (*ctor)(void *);
86724 int inuse; /* Offset to metadata */
86725 int align; /* Alignment */
86726diff --git a/include/linux/smp.h b/include/linux/smp.h
86727index 34347f2..8739978 100644
86728--- a/include/linux/smp.h
86729+++ b/include/linux/smp.h
86730@@ -174,7 +174,9 @@ static inline void kick_all_cpus_sync(void) { }
86731 #endif
86732
86733 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
86734+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
86735 #define put_cpu() preempt_enable()
86736+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
86737
86738 /*
86739 * Callback to arch code if there's nosmp or maxcpus=0 on the
86740diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
86741index 46cca4c..3323536 100644
86742--- a/include/linux/sock_diag.h
86743+++ b/include/linux/sock_diag.h
86744@@ -11,7 +11,7 @@ struct sock;
86745 struct sock_diag_handler {
86746 __u8 family;
86747 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
86748-};
86749+} __do_const;
86750
86751 int sock_diag_register(const struct sock_diag_handler *h);
86752 void sock_diag_unregister(const struct sock_diag_handler *h);
86753diff --git a/include/linux/sonet.h b/include/linux/sonet.h
86754index 680f9a3..f13aeb0 100644
86755--- a/include/linux/sonet.h
86756+++ b/include/linux/sonet.h
86757@@ -7,7 +7,7 @@
86758 #include <uapi/linux/sonet.h>
86759
86760 struct k_sonet_stats {
86761-#define __HANDLE_ITEM(i) atomic_t i
86762+#define __HANDLE_ITEM(i) atomic_unchecked_t i
86763 __SONET_ITEMS
86764 #undef __HANDLE_ITEM
86765 };
86766diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
86767index 07d8e53..dc934c9 100644
86768--- a/include/linux/sunrpc/addr.h
86769+++ b/include/linux/sunrpc/addr.h
86770@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
86771 {
86772 switch (sap->sa_family) {
86773 case AF_INET:
86774- return ntohs(((struct sockaddr_in *)sap)->sin_port);
86775+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
86776 case AF_INET6:
86777- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
86778+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
86779 }
86780 return 0;
86781 }
86782@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
86783 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
86784 const struct sockaddr *src)
86785 {
86786- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
86787+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
86788 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
86789
86790 dsin->sin_family = ssin->sin_family;
86791@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
86792 if (sa->sa_family != AF_INET6)
86793 return 0;
86794
86795- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
86796+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
86797 }
86798
86799 #endif /* _LINUX_SUNRPC_ADDR_H */
86800diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
86801index 70736b9..37f33db 100644
86802--- a/include/linux/sunrpc/clnt.h
86803+++ b/include/linux/sunrpc/clnt.h
86804@@ -97,7 +97,7 @@ struct rpc_procinfo {
86805 unsigned int p_timer; /* Which RTT timer to use */
86806 u32 p_statidx; /* Which procedure to account */
86807 const char * p_name; /* name of procedure */
86808-};
86809+} __do_const;
86810
86811 #ifdef __KERNEL__
86812
86813diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
86814index 1bc7cd0..7912dc2 100644
86815--- a/include/linux/sunrpc/svc.h
86816+++ b/include/linux/sunrpc/svc.h
86817@@ -417,7 +417,7 @@ struct svc_procedure {
86818 unsigned int pc_count; /* call count */
86819 unsigned int pc_cachetype; /* cache info (NFS) */
86820 unsigned int pc_xdrressize; /* maximum size of XDR reply */
86821-};
86822+} __do_const;
86823
86824 /*
86825 * Function prototypes.
86826diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
86827index 5cf99a0..c0a1b98 100644
86828--- a/include/linux/sunrpc/svc_rdma.h
86829+++ b/include/linux/sunrpc/svc_rdma.h
86830@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
86831 extern unsigned int svcrdma_max_requests;
86832 extern unsigned int svcrdma_max_req_size;
86833
86834-extern atomic_t rdma_stat_recv;
86835-extern atomic_t rdma_stat_read;
86836-extern atomic_t rdma_stat_write;
86837-extern atomic_t rdma_stat_sq_starve;
86838-extern atomic_t rdma_stat_rq_starve;
86839-extern atomic_t rdma_stat_rq_poll;
86840-extern atomic_t rdma_stat_rq_prod;
86841-extern atomic_t rdma_stat_sq_poll;
86842-extern atomic_t rdma_stat_sq_prod;
86843+extern atomic_unchecked_t rdma_stat_recv;
86844+extern atomic_unchecked_t rdma_stat_read;
86845+extern atomic_unchecked_t rdma_stat_write;
86846+extern atomic_unchecked_t rdma_stat_sq_starve;
86847+extern atomic_unchecked_t rdma_stat_rq_starve;
86848+extern atomic_unchecked_t rdma_stat_rq_poll;
86849+extern atomic_unchecked_t rdma_stat_rq_prod;
86850+extern atomic_unchecked_t rdma_stat_sq_poll;
86851+extern atomic_unchecked_t rdma_stat_sq_prod;
86852
86853 #define RPCRDMA_VERSION 1
86854
86855diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
86856index 8d71d65..f79586e 100644
86857--- a/include/linux/sunrpc/svcauth.h
86858+++ b/include/linux/sunrpc/svcauth.h
86859@@ -120,7 +120,7 @@ struct auth_ops {
86860 int (*release)(struct svc_rqst *rq);
86861 void (*domain_release)(struct auth_domain *);
86862 int (*set_client)(struct svc_rqst *rq);
86863-};
86864+} __do_const;
86865
86866 #define SVC_GARBAGE 1
86867 #define SVC_SYSERR 2
86868diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
86869index e7a018e..49f8b17 100644
86870--- a/include/linux/swiotlb.h
86871+++ b/include/linux/swiotlb.h
86872@@ -60,7 +60,8 @@ extern void
86873
86874 extern void
86875 swiotlb_free_coherent(struct device *hwdev, size_t size,
86876- void *vaddr, dma_addr_t dma_handle);
86877+ void *vaddr, dma_addr_t dma_handle,
86878+ struct dma_attrs *attrs);
86879
86880 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
86881 unsigned long offset, size_t size,
86882diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
86883index b0881a0..559a440 100644
86884--- a/include/linux/syscalls.h
86885+++ b/include/linux/syscalls.h
86886@@ -98,10 +98,16 @@ struct sigaltstack;
86887 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
86888
86889 #define __SC_DECL(t, a) t a
86890+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
86891 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
86892 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
86893 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
86894-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
86895+#define __SC_LONG(t, a) __typeof( \
86896+ __builtin_choose_expr( \
86897+ sizeof(t) > sizeof(int), \
86898+ (t) 0, \
86899+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
86900+ )) a
86901 #define __SC_CAST(t, a) (t) a
86902 #define __SC_ARGS(t, a) a
86903 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
86904@@ -379,11 +385,11 @@ asmlinkage long sys_sync(void);
86905 asmlinkage long sys_fsync(unsigned int fd);
86906 asmlinkage long sys_fdatasync(unsigned int fd);
86907 asmlinkage long sys_bdflush(int func, long data);
86908-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
86909- char __user *type, unsigned long flags,
86910+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
86911+ const char __user *type, unsigned long flags,
86912 void __user *data);
86913-asmlinkage long sys_umount(char __user *name, int flags);
86914-asmlinkage long sys_oldumount(char __user *name);
86915+asmlinkage long sys_umount(const char __user *name, int flags);
86916+asmlinkage long sys_oldumount(const char __user *name);
86917 asmlinkage long sys_truncate(const char __user *path, long length);
86918 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
86919 asmlinkage long sys_stat(const char __user *filename,
86920@@ -595,7 +601,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
86921 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
86922 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
86923 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
86924- struct sockaddr __user *, int);
86925+ struct sockaddr __user *, int) __intentional_overflow(0);
86926 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
86927 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
86928 unsigned int vlen, unsigned flags);
86929diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
86930index 27b3b0b..e093dd9 100644
86931--- a/include/linux/syscore_ops.h
86932+++ b/include/linux/syscore_ops.h
86933@@ -16,7 +16,7 @@ struct syscore_ops {
86934 int (*suspend)(void);
86935 void (*resume)(void);
86936 void (*shutdown)(void);
86937-};
86938+} __do_const;
86939
86940 extern void register_syscore_ops(struct syscore_ops *ops);
86941 extern void unregister_syscore_ops(struct syscore_ops *ops);
86942diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
86943index 14a8ff2..fa95f3a 100644
86944--- a/include/linux/sysctl.h
86945+++ b/include/linux/sysctl.h
86946@@ -34,13 +34,13 @@ struct ctl_table_root;
86947 struct ctl_table_header;
86948 struct ctl_dir;
86949
86950-typedef struct ctl_table ctl_table;
86951-
86952 typedef int proc_handler (struct ctl_table *ctl, int write,
86953 void __user *buffer, size_t *lenp, loff_t *ppos);
86954
86955 extern int proc_dostring(struct ctl_table *, int,
86956 void __user *, size_t *, loff_t *);
86957+extern int proc_dostring_modpriv(struct ctl_table *, int,
86958+ void __user *, size_t *, loff_t *);
86959 extern int proc_dointvec(struct ctl_table *, int,
86960 void __user *, size_t *, loff_t *);
86961 extern int proc_dointvec_minmax(struct ctl_table *, int,
86962@@ -115,7 +115,9 @@ struct ctl_table
86963 struct ctl_table_poll *poll;
86964 void *extra1;
86965 void *extra2;
86966-};
86967+} __do_const __randomize_layout;
86968+typedef struct ctl_table __no_const ctl_table_no_const;
86969+typedef struct ctl_table ctl_table;
86970
86971 struct ctl_node {
86972 struct rb_node node;
86973diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
86974index f97d0db..c1187dc 100644
86975--- a/include/linux/sysfs.h
86976+++ b/include/linux/sysfs.h
86977@@ -34,7 +34,8 @@ struct attribute {
86978 struct lock_class_key *key;
86979 struct lock_class_key skey;
86980 #endif
86981-};
86982+} __do_const;
86983+typedef struct attribute __no_const attribute_no_const;
86984
86985 /**
86986 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
86987@@ -63,7 +64,8 @@ struct attribute_group {
86988 struct attribute *, int);
86989 struct attribute **attrs;
86990 struct bin_attribute **bin_attrs;
86991-};
86992+} __do_const;
86993+typedef struct attribute_group __no_const attribute_group_no_const;
86994
86995 /**
86996 * Use these macros to make defining attributes easier. See include/linux/device.h
86997@@ -128,7 +130,8 @@ struct bin_attribute {
86998 char *, loff_t, size_t);
86999 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
87000 struct vm_area_struct *vma);
87001-};
87002+} __do_const;
87003+typedef struct bin_attribute __no_const bin_attribute_no_const;
87004
87005 /**
87006 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
87007diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
87008index 387fa7d..3fcde6b 100644
87009--- a/include/linux/sysrq.h
87010+++ b/include/linux/sysrq.h
87011@@ -16,6 +16,7 @@
87012
87013 #include <linux/errno.h>
87014 #include <linux/types.h>
87015+#include <linux/compiler.h>
87016
87017 /* Possible values of bitmask for enabling sysrq functions */
87018 /* 0x0001 is reserved for enable everything */
87019@@ -33,7 +34,7 @@ struct sysrq_key_op {
87020 char *help_msg;
87021 char *action_msg;
87022 int enable_mask;
87023-};
87024+} __do_const;
87025
87026 #ifdef CONFIG_MAGIC_SYSRQ
87027
87028diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
87029index ff307b5..f1a4468 100644
87030--- a/include/linux/thread_info.h
87031+++ b/include/linux/thread_info.h
87032@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
87033 #error "no set_restore_sigmask() provided and default one won't work"
87034 #endif
87035
87036+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
87037+
87038+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
87039+{
87040+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
87041+}
87042+
87043 #endif /* __KERNEL__ */
87044
87045 #endif /* _LINUX_THREAD_INFO_H */
87046diff --git a/include/linux/tty.h b/include/linux/tty.h
87047index 1c3316a..ae83b9f 100644
87048--- a/include/linux/tty.h
87049+++ b/include/linux/tty.h
87050@@ -202,7 +202,7 @@ struct tty_port {
87051 const struct tty_port_operations *ops; /* Port operations */
87052 spinlock_t lock; /* Lock protecting tty field */
87053 int blocked_open; /* Waiting to open */
87054- int count; /* Usage count */
87055+ atomic_t count; /* Usage count */
87056 wait_queue_head_t open_wait; /* Open waiters */
87057 wait_queue_head_t close_wait; /* Close waiters */
87058 wait_queue_head_t delta_msr_wait; /* Modem status change */
87059@@ -284,7 +284,7 @@ struct tty_struct {
87060 /* If the tty has a pending do_SAK, queue it here - akpm */
87061 struct work_struct SAK_work;
87062 struct tty_port *port;
87063-};
87064+} __randomize_layout;
87065
87066 /* Each of a tty's open files has private_data pointing to tty_file_private */
87067 struct tty_file_private {
87068@@ -550,7 +550,7 @@ extern int tty_port_open(struct tty_port *port,
87069 struct tty_struct *tty, struct file *filp);
87070 static inline int tty_port_users(struct tty_port *port)
87071 {
87072- return port->count + port->blocked_open;
87073+ return atomic_read(&port->count) + port->blocked_open;
87074 }
87075
87076 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
87077diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
87078index 756a609..89db85e 100644
87079--- a/include/linux/tty_driver.h
87080+++ b/include/linux/tty_driver.h
87081@@ -285,7 +285,7 @@ struct tty_operations {
87082 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
87083 #endif
87084 const struct file_operations *proc_fops;
87085-};
87086+} __do_const __randomize_layout;
87087
87088 struct tty_driver {
87089 int magic; /* magic number for this structure */
87090@@ -319,7 +319,7 @@ struct tty_driver {
87091
87092 const struct tty_operations *ops;
87093 struct list_head tty_drivers;
87094-};
87095+} __randomize_layout;
87096
87097 extern struct list_head tty_drivers;
87098
87099diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
87100index 00c9d68..bc0188b 100644
87101--- a/include/linux/tty_ldisc.h
87102+++ b/include/linux/tty_ldisc.h
87103@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
87104
87105 struct module *owner;
87106
87107- int refcount;
87108+ atomic_t refcount;
87109 };
87110
87111 struct tty_ldisc {
87112diff --git a/include/linux/types.h b/include/linux/types.h
87113index a0bb704..f511c77 100644
87114--- a/include/linux/types.h
87115+++ b/include/linux/types.h
87116@@ -177,10 +177,26 @@ typedef struct {
87117 int counter;
87118 } atomic_t;
87119
87120+#ifdef CONFIG_PAX_REFCOUNT
87121+typedef struct {
87122+ int counter;
87123+} atomic_unchecked_t;
87124+#else
87125+typedef atomic_t atomic_unchecked_t;
87126+#endif
87127+
87128 #ifdef CONFIG_64BIT
87129 typedef struct {
87130 long counter;
87131 } atomic64_t;
87132+
87133+#ifdef CONFIG_PAX_REFCOUNT
87134+typedef struct {
87135+ long counter;
87136+} atomic64_unchecked_t;
87137+#else
87138+typedef atomic64_t atomic64_unchecked_t;
87139+#endif
87140 #endif
87141
87142 struct list_head {
87143diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
87144index ecd3319..8a36ded 100644
87145--- a/include/linux/uaccess.h
87146+++ b/include/linux/uaccess.h
87147@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
87148 long ret; \
87149 mm_segment_t old_fs = get_fs(); \
87150 \
87151- set_fs(KERNEL_DS); \
87152 pagefault_disable(); \
87153- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
87154- pagefault_enable(); \
87155+ set_fs(KERNEL_DS); \
87156+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
87157 set_fs(old_fs); \
87158+ pagefault_enable(); \
87159 ret; \
87160 })
87161
87162diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
87163index 2d1f9b6..d7a9fce 100644
87164--- a/include/linux/uidgid.h
87165+++ b/include/linux/uidgid.h
87166@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
87167
87168 #endif /* CONFIG_USER_NS */
87169
87170+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
87171+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
87172+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
87173+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
87174+
87175 #endif /* _LINUX_UIDGID_H */
87176diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
87177index 99c1b4d..562e6f3 100644
87178--- a/include/linux/unaligned/access_ok.h
87179+++ b/include/linux/unaligned/access_ok.h
87180@@ -4,34 +4,34 @@
87181 #include <linux/kernel.h>
87182 #include <asm/byteorder.h>
87183
87184-static inline u16 get_unaligned_le16(const void *p)
87185+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
87186 {
87187- return le16_to_cpup((__le16 *)p);
87188+ return le16_to_cpup((const __le16 *)p);
87189 }
87190
87191-static inline u32 get_unaligned_le32(const void *p)
87192+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
87193 {
87194- return le32_to_cpup((__le32 *)p);
87195+ return le32_to_cpup((const __le32 *)p);
87196 }
87197
87198-static inline u64 get_unaligned_le64(const void *p)
87199+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
87200 {
87201- return le64_to_cpup((__le64 *)p);
87202+ return le64_to_cpup((const __le64 *)p);
87203 }
87204
87205-static inline u16 get_unaligned_be16(const void *p)
87206+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
87207 {
87208- return be16_to_cpup((__be16 *)p);
87209+ return be16_to_cpup((const __be16 *)p);
87210 }
87211
87212-static inline u32 get_unaligned_be32(const void *p)
87213+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
87214 {
87215- return be32_to_cpup((__be32 *)p);
87216+ return be32_to_cpup((const __be32 *)p);
87217 }
87218
87219-static inline u64 get_unaligned_be64(const void *p)
87220+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
87221 {
87222- return be64_to_cpup((__be64 *)p);
87223+ return be64_to_cpup((const __be64 *)p);
87224 }
87225
87226 static inline void put_unaligned_le16(u16 val, void *p)
87227diff --git a/include/linux/usb.h b/include/linux/usb.h
87228index d2465bc..5256de4 100644
87229--- a/include/linux/usb.h
87230+++ b/include/linux/usb.h
87231@@ -571,7 +571,7 @@ struct usb_device {
87232 int maxchild;
87233
87234 u32 quirks;
87235- atomic_t urbnum;
87236+ atomic_unchecked_t urbnum;
87237
87238 unsigned long active_duration;
87239
87240@@ -1655,7 +1655,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
87241
87242 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
87243 __u8 request, __u8 requesttype, __u16 value, __u16 index,
87244- void *data, __u16 size, int timeout);
87245+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
87246 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
87247 void *data, int len, int *actual_length, int timeout);
87248 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
87249diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
87250index e452ba6..78f8e80 100644
87251--- a/include/linux/usb/renesas_usbhs.h
87252+++ b/include/linux/usb/renesas_usbhs.h
87253@@ -39,7 +39,7 @@ enum {
87254 */
87255 struct renesas_usbhs_driver_callback {
87256 int (*notify_hotplug)(struct platform_device *pdev);
87257-};
87258+} __no_const;
87259
87260 /*
87261 * callback functions for platform
87262diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
87263index 4836ba3..603f6ee 100644
87264--- a/include/linux/user_namespace.h
87265+++ b/include/linux/user_namespace.h
87266@@ -33,7 +33,7 @@ struct user_namespace {
87267 struct key *persistent_keyring_register;
87268 struct rw_semaphore persistent_keyring_register_sem;
87269 #endif
87270-};
87271+} __randomize_layout;
87272
87273 extern struct user_namespace init_user_ns;
87274
87275diff --git a/include/linux/utsname.h b/include/linux/utsname.h
87276index 239e277..22a5cf5 100644
87277--- a/include/linux/utsname.h
87278+++ b/include/linux/utsname.h
87279@@ -24,7 +24,7 @@ struct uts_namespace {
87280 struct new_utsname name;
87281 struct user_namespace *user_ns;
87282 unsigned int proc_inum;
87283-};
87284+} __randomize_layout;
87285 extern struct uts_namespace init_uts_ns;
87286
87287 #ifdef CONFIG_UTS_NS
87288diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
87289index 6f8fbcf..4efc177 100644
87290--- a/include/linux/vermagic.h
87291+++ b/include/linux/vermagic.h
87292@@ -25,9 +25,42 @@
87293 #define MODULE_ARCH_VERMAGIC ""
87294 #endif
87295
87296+#ifdef CONFIG_PAX_REFCOUNT
87297+#define MODULE_PAX_REFCOUNT "REFCOUNT "
87298+#else
87299+#define MODULE_PAX_REFCOUNT ""
87300+#endif
87301+
87302+#ifdef CONSTIFY_PLUGIN
87303+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
87304+#else
87305+#define MODULE_CONSTIFY_PLUGIN ""
87306+#endif
87307+
87308+#ifdef STACKLEAK_PLUGIN
87309+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
87310+#else
87311+#define MODULE_STACKLEAK_PLUGIN ""
87312+#endif
87313+
87314+#ifdef RANDSTRUCT_PLUGIN
87315+#include <generated/randomize_layout_hash.h>
87316+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
87317+#else
87318+#define MODULE_RANDSTRUCT_PLUGIN
87319+#endif
87320+
87321+#ifdef CONFIG_GRKERNSEC
87322+#define MODULE_GRSEC "GRSEC "
87323+#else
87324+#define MODULE_GRSEC ""
87325+#endif
87326+
87327 #define VERMAGIC_STRING \
87328 UTS_RELEASE " " \
87329 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
87330 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
87331- MODULE_ARCH_VERMAGIC
87332+ MODULE_ARCH_VERMAGIC \
87333+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
87334+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
87335
87336diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
87337index 502073a..a7de024 100644
87338--- a/include/linux/vga_switcheroo.h
87339+++ b/include/linux/vga_switcheroo.h
87340@@ -63,8 +63,8 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
87341
87342 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
87343
87344-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
87345-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
87346+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
87347+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
87348 #else
87349
87350 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
87351@@ -81,8 +81,8 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
87352
87353 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
87354
87355-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
87356-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
87357+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
87358+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
87359
87360 #endif
87361 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
87362diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
87363index 4b8a891..e9a2863 100644
87364--- a/include/linux/vmalloc.h
87365+++ b/include/linux/vmalloc.h
87366@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
87367 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
87368 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
87369 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
87370+
87371+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
87372+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
87373+#endif
87374+
87375 /* bits [20..32] reserved for arch specific ioremap internals */
87376
87377 /*
87378@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
87379 unsigned long flags, pgprot_t prot);
87380 extern void vunmap(const void *addr);
87381
87382+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
87383+extern void unmap_process_stacks(struct task_struct *task);
87384+#endif
87385+
87386 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
87387 unsigned long uaddr, void *kaddr,
87388 unsigned long size);
87389@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
87390
87391 /* for /dev/kmem */
87392 extern long vread(char *buf, char *addr, unsigned long count);
87393-extern long vwrite(char *buf, char *addr, unsigned long count);
87394+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
87395
87396 /*
87397 * Internals. Dont't use..
87398diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
87399index 82e7db7..f8ce3d0 100644
87400--- a/include/linux/vmstat.h
87401+++ b/include/linux/vmstat.h
87402@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
87403 /*
87404 * Zone based page accounting with per cpu differentials.
87405 */
87406-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87407+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87408
87409 static inline void zone_page_state_add(long x, struct zone *zone,
87410 enum zone_stat_item item)
87411 {
87412- atomic_long_add(x, &zone->vm_stat[item]);
87413- atomic_long_add(x, &vm_stat[item]);
87414+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
87415+ atomic_long_add_unchecked(x, &vm_stat[item]);
87416 }
87417
87418-static inline unsigned long global_page_state(enum zone_stat_item item)
87419+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
87420 {
87421- long x = atomic_long_read(&vm_stat[item]);
87422+ long x = atomic_long_read_unchecked(&vm_stat[item]);
87423 #ifdef CONFIG_SMP
87424 if (x < 0)
87425 x = 0;
87426@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
87427 return x;
87428 }
87429
87430-static inline unsigned long zone_page_state(struct zone *zone,
87431+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
87432 enum zone_stat_item item)
87433 {
87434- long x = atomic_long_read(&zone->vm_stat[item]);
87435+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
87436 #ifdef CONFIG_SMP
87437 if (x < 0)
87438 x = 0;
87439@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
87440 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
87441 enum zone_stat_item item)
87442 {
87443- long x = atomic_long_read(&zone->vm_stat[item]);
87444+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
87445
87446 #ifdef CONFIG_SMP
87447 int cpu;
87448@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
87449
87450 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
87451 {
87452- atomic_long_inc(&zone->vm_stat[item]);
87453- atomic_long_inc(&vm_stat[item]);
87454+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
87455+ atomic_long_inc_unchecked(&vm_stat[item]);
87456 }
87457
87458 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
87459 {
87460- atomic_long_dec(&zone->vm_stat[item]);
87461- atomic_long_dec(&vm_stat[item]);
87462+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
87463+ atomic_long_dec_unchecked(&vm_stat[item]);
87464 }
87465
87466 static inline void __inc_zone_page_state(struct page *page,
87467diff --git a/include/linux/xattr.h b/include/linux/xattr.h
87468index 91b0a68..0e9adf6 100644
87469--- a/include/linux/xattr.h
87470+++ b/include/linux/xattr.h
87471@@ -28,7 +28,7 @@ struct xattr_handler {
87472 size_t size, int handler_flags);
87473 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
87474 size_t size, int flags, int handler_flags);
87475-};
87476+} __do_const;
87477
87478 struct xattr {
87479 const char *name;
87480@@ -37,6 +37,9 @@ struct xattr {
87481 };
87482
87483 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
87484+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
87485+ssize_t pax_getxattr(struct dentry *, void *, size_t);
87486+#endif
87487 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
87488 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
87489 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
87490diff --git a/include/linux/zlib.h b/include/linux/zlib.h
87491index 9c5a6b4..09c9438 100644
87492--- a/include/linux/zlib.h
87493+++ b/include/linux/zlib.h
87494@@ -31,6 +31,7 @@
87495 #define _ZLIB_H
87496
87497 #include <linux/zconf.h>
87498+#include <linux/compiler.h>
87499
87500 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
87501 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
87502@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
87503
87504 /* basic functions */
87505
87506-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
87507+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
87508 /*
87509 Returns the number of bytes that needs to be allocated for a per-
87510 stream workspace with the specified parameters. A pointer to this
87511diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
87512index eec6e46..82d5641 100644
87513--- a/include/media/v4l2-dev.h
87514+++ b/include/media/v4l2-dev.h
87515@@ -77,7 +77,7 @@ struct v4l2_file_operations {
87516 int (*mmap) (struct file *, struct vm_area_struct *);
87517 int (*open) (struct file *);
87518 int (*release) (struct file *);
87519-};
87520+} __do_const;
87521
87522 /*
87523 * Newer version of video_device, handled by videodev2.c
87524diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
87525index ffb69da..040393e 100644
87526--- a/include/media/v4l2-device.h
87527+++ b/include/media/v4l2-device.h
87528@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
87529 this function returns 0. If the name ends with a digit (e.g. cx18),
87530 then the name will be set to cx18-0 since cx180 looks really odd. */
87531 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
87532- atomic_t *instance);
87533+ atomic_unchecked_t *instance);
87534
87535 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
87536 Since the parent disappears this ensures that v4l2_dev doesn't have an
87537diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
87538index d9fa68f..45c88d1 100644
87539--- a/include/net/9p/transport.h
87540+++ b/include/net/9p/transport.h
87541@@ -63,7 +63,7 @@ struct p9_trans_module {
87542 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
87543 int (*zc_request)(struct p9_client *, struct p9_req_t *,
87544 char *, char *, int , int, int, int);
87545-};
87546+} __do_const;
87547
87548 void v9fs_register_trans(struct p9_trans_module *m);
87549 void v9fs_unregister_trans(struct p9_trans_module *m);
87550diff --git a/include/net/af_unix.h b/include/net/af_unix.h
87551index a175ba4..196eb82 100644
87552--- a/include/net/af_unix.h
87553+++ b/include/net/af_unix.h
87554@@ -36,7 +36,7 @@ struct unix_skb_parms {
87555 u32 secid; /* Security ID */
87556 #endif
87557 u32 consumed;
87558-};
87559+} __randomize_layout;
87560
87561 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
87562 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
87563diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
87564index 4abdcb2..945c5cc 100644
87565--- a/include/net/bluetooth/l2cap.h
87566+++ b/include/net/bluetooth/l2cap.h
87567@@ -601,7 +601,7 @@ struct l2cap_ops {
87568 long (*get_sndtimeo) (struct l2cap_chan *chan);
87569 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
87570 unsigned long len, int nb);
87571-};
87572+} __do_const;
87573
87574 struct l2cap_conn {
87575 struct hci_conn *hcon;
87576diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
87577index f2ae33d..c457cf0 100644
87578--- a/include/net/caif/cfctrl.h
87579+++ b/include/net/caif/cfctrl.h
87580@@ -52,7 +52,7 @@ struct cfctrl_rsp {
87581 void (*radioset_rsp)(void);
87582 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
87583 struct cflayer *client_layer);
87584-};
87585+} __no_const;
87586
87587 /* Link Setup Parameters for CAIF-Links. */
87588 struct cfctrl_link_param {
87589@@ -101,8 +101,8 @@ struct cfctrl_request_info {
87590 struct cfctrl {
87591 struct cfsrvl serv;
87592 struct cfctrl_rsp res;
87593- atomic_t req_seq_no;
87594- atomic_t rsp_seq_no;
87595+ atomic_unchecked_t req_seq_no;
87596+ atomic_unchecked_t rsp_seq_no;
87597 struct list_head list;
87598 /* Protects from simultaneous access to first_req list */
87599 spinlock_t info_list_lock;
87600diff --git a/include/net/flow.h b/include/net/flow.h
87601index 8109a15..504466d 100644
87602--- a/include/net/flow.h
87603+++ b/include/net/flow.h
87604@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
87605
87606 void flow_cache_flush(struct net *net);
87607 void flow_cache_flush_deferred(struct net *net);
87608-extern atomic_t flow_cache_genid;
87609+extern atomic_unchecked_t flow_cache_genid;
87610
87611 #endif
87612diff --git a/include/net/genetlink.h b/include/net/genetlink.h
87613index 93695f0..766d71c 100644
87614--- a/include/net/genetlink.h
87615+++ b/include/net/genetlink.h
87616@@ -120,7 +120,7 @@ struct genl_ops {
87617 u8 cmd;
87618 u8 internal_flags;
87619 u8 flags;
87620-};
87621+} __do_const;
87622
87623 int __genl_register_family(struct genl_family *family);
87624
87625diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
87626index 734d9b5..48a9a4b 100644
87627--- a/include/net/gro_cells.h
87628+++ b/include/net/gro_cells.h
87629@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
87630 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
87631
87632 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
87633- atomic_long_inc(&dev->rx_dropped);
87634+ atomic_long_inc_unchecked(&dev->rx_dropped);
87635 kfree_skb(skb);
87636 return;
87637 }
87638diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
87639index 7a43138..bc76865 100644
87640--- a/include/net/inet_connection_sock.h
87641+++ b/include/net/inet_connection_sock.h
87642@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
87643 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
87644 int (*bind_conflict)(const struct sock *sk,
87645 const struct inet_bind_bucket *tb, bool relax);
87646-};
87647+} __do_const;
87648
87649 /** inet_connection_sock - INET connection oriented sock
87650 *
87651diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
87652index 01d590e..f69c61d 100644
87653--- a/include/net/inetpeer.h
87654+++ b/include/net/inetpeer.h
87655@@ -47,7 +47,7 @@ struct inet_peer {
87656 */
87657 union {
87658 struct {
87659- atomic_t rid; /* Frag reception counter */
87660+ atomic_unchecked_t rid; /* Frag reception counter */
87661 };
87662 struct rcu_head rcu;
87663 struct inet_peer *gc_next;
87664diff --git a/include/net/ip.h b/include/net/ip.h
87665index 7596eb2..f7f5fad 100644
87666--- a/include/net/ip.h
87667+++ b/include/net/ip.h
87668@@ -309,7 +309,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
87669 }
87670 }
87671
87672-u32 ip_idents_reserve(u32 hash, int segs);
87673+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
87674 void __ip_select_ident(struct iphdr *iph, int segs);
87675
87676 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
87677diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
87678index 9922093..a1755d6 100644
87679--- a/include/net/ip_fib.h
87680+++ b/include/net/ip_fib.h
87681@@ -169,7 +169,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
87682
87683 #define FIB_RES_SADDR(net, res) \
87684 ((FIB_RES_NH(res).nh_saddr_genid == \
87685- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
87686+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
87687 FIB_RES_NH(res).nh_saddr : \
87688 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
87689 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
87690diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
87691index 624a8a5..b1e2a24 100644
87692--- a/include/net/ip_vs.h
87693+++ b/include/net/ip_vs.h
87694@@ -558,7 +558,7 @@ struct ip_vs_conn {
87695 struct ip_vs_conn *control; /* Master control connection */
87696 atomic_t n_control; /* Number of controlled ones */
87697 struct ip_vs_dest *dest; /* real server */
87698- atomic_t in_pkts; /* incoming packet counter */
87699+ atomic_unchecked_t in_pkts; /* incoming packet counter */
87700
87701 /* packet transmitter for different forwarding methods. If it
87702 mangles the packet, it must return NF_DROP or better NF_STOLEN,
87703@@ -705,7 +705,7 @@ struct ip_vs_dest {
87704 __be16 port; /* port number of the server */
87705 union nf_inet_addr addr; /* IP address of the server */
87706 volatile unsigned int flags; /* dest status flags */
87707- atomic_t conn_flags; /* flags to copy to conn */
87708+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
87709 atomic_t weight; /* server weight */
87710
87711 atomic_t refcnt; /* reference counter */
87712@@ -960,11 +960,11 @@ struct netns_ipvs {
87713 /* ip_vs_lblc */
87714 int sysctl_lblc_expiration;
87715 struct ctl_table_header *lblc_ctl_header;
87716- struct ctl_table *lblc_ctl_table;
87717+ ctl_table_no_const *lblc_ctl_table;
87718 /* ip_vs_lblcr */
87719 int sysctl_lblcr_expiration;
87720 struct ctl_table_header *lblcr_ctl_header;
87721- struct ctl_table *lblcr_ctl_table;
87722+ ctl_table_no_const *lblcr_ctl_table;
87723 /* ip_vs_est */
87724 struct list_head est_list; /* estimator list */
87725 spinlock_t est_lock;
87726diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
87727index 8d4f588..2e37ad2 100644
87728--- a/include/net/irda/ircomm_tty.h
87729+++ b/include/net/irda/ircomm_tty.h
87730@@ -33,6 +33,7 @@
87731 #include <linux/termios.h>
87732 #include <linux/timer.h>
87733 #include <linux/tty.h> /* struct tty_struct */
87734+#include <asm/local.h>
87735
87736 #include <net/irda/irias_object.h>
87737 #include <net/irda/ircomm_core.h>
87738diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
87739index 714cc9a..ea05f3e 100644
87740--- a/include/net/iucv/af_iucv.h
87741+++ b/include/net/iucv/af_iucv.h
87742@@ -149,7 +149,7 @@ struct iucv_skb_cb {
87743 struct iucv_sock_list {
87744 struct hlist_head head;
87745 rwlock_t lock;
87746- atomic_t autobind_name;
87747+ atomic_unchecked_t autobind_name;
87748 };
87749
87750 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
87751diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
87752index f3be818..bf46196 100644
87753--- a/include/net/llc_c_ac.h
87754+++ b/include/net/llc_c_ac.h
87755@@ -87,7 +87,7 @@
87756 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
87757 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
87758
87759-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
87760+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
87761
87762 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
87763 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
87764diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
87765index 3948cf1..83b28c4 100644
87766--- a/include/net/llc_c_ev.h
87767+++ b/include/net/llc_c_ev.h
87768@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
87769 return (struct llc_conn_state_ev *)skb->cb;
87770 }
87771
87772-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
87773-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
87774+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
87775+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
87776
87777 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
87778 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
87779diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
87780index 0e79cfb..f46db31 100644
87781--- a/include/net/llc_c_st.h
87782+++ b/include/net/llc_c_st.h
87783@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
87784 u8 next_state;
87785 llc_conn_ev_qfyr_t *ev_qualifiers;
87786 llc_conn_action_t *ev_actions;
87787-};
87788+} __do_const;
87789
87790 struct llc_conn_state {
87791 u8 current_state;
87792diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
87793index a61b98c..aade1eb 100644
87794--- a/include/net/llc_s_ac.h
87795+++ b/include/net/llc_s_ac.h
87796@@ -23,7 +23,7 @@
87797 #define SAP_ACT_TEST_IND 9
87798
87799 /* All action functions must look like this */
87800-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
87801+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
87802
87803 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
87804 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
87805diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
87806index 567c681..cd73ac02 100644
87807--- a/include/net/llc_s_st.h
87808+++ b/include/net/llc_s_st.h
87809@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
87810 llc_sap_ev_t ev;
87811 u8 next_state;
87812 llc_sap_action_t *ev_actions;
87813-};
87814+} __do_const;
87815
87816 struct llc_sap_state {
87817 u8 curr_state;
87818diff --git a/include/net/mac80211.h b/include/net/mac80211.h
87819index 421b6ec..5a03729 100644
87820--- a/include/net/mac80211.h
87821+++ b/include/net/mac80211.h
87822@@ -4588,7 +4588,7 @@ struct rate_control_ops {
87823 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
87824
87825 u32 (*get_expected_throughput)(void *priv_sta);
87826-};
87827+} __do_const;
87828
87829 static inline int rate_supported(struct ieee80211_sta *sta,
87830 enum ieee80211_band band,
87831diff --git a/include/net/neighbour.h b/include/net/neighbour.h
87832index 47f4254..fd095bc 100644
87833--- a/include/net/neighbour.h
87834+++ b/include/net/neighbour.h
87835@@ -163,7 +163,7 @@ struct neigh_ops {
87836 void (*error_report)(struct neighbour *, struct sk_buff *);
87837 int (*output)(struct neighbour *, struct sk_buff *);
87838 int (*connected_output)(struct neighbour *, struct sk_buff *);
87839-};
87840+} __do_const;
87841
87842 struct pneigh_entry {
87843 struct pneigh_entry *next;
87844@@ -217,7 +217,7 @@ struct neigh_table {
87845 struct neigh_statistics __percpu *stats;
87846 struct neigh_hash_table __rcu *nht;
87847 struct pneigh_entry **phash_buckets;
87848-};
87849+} __randomize_layout;
87850
87851 static inline int neigh_parms_family(struct neigh_parms *p)
87852 {
87853diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
87854index 361d260..903d15f 100644
87855--- a/include/net/net_namespace.h
87856+++ b/include/net/net_namespace.h
87857@@ -129,8 +129,8 @@ struct net {
87858 struct netns_ipvs *ipvs;
87859 #endif
87860 struct sock *diag_nlsk;
87861- atomic_t fnhe_genid;
87862-};
87863+ atomic_unchecked_t fnhe_genid;
87864+} __randomize_layout;
87865
87866 #include <linux/seq_file_net.h>
87867
87868@@ -286,7 +286,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
87869 #define __net_init __init
87870 #define __net_exit __exit_refok
87871 #define __net_initdata __initdata
87872+#ifdef CONSTIFY_PLUGIN
87873 #define __net_initconst __initconst
87874+#else
87875+#define __net_initconst __initdata
87876+#endif
87877 #endif
87878
87879 struct pernet_operations {
87880@@ -296,7 +300,7 @@ struct pernet_operations {
87881 void (*exit_batch)(struct list_head *net_exit_list);
87882 int *id;
87883 size_t size;
87884-};
87885+} __do_const;
87886
87887 /*
87888 * Use these carefully. If you implement a network device and it
87889@@ -344,23 +348,23 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
87890
87891 static inline int rt_genid_ipv4(struct net *net)
87892 {
87893- return atomic_read(&net->ipv4.rt_genid);
87894+ return atomic_read_unchecked(&net->ipv4.rt_genid);
87895 }
87896
87897 static inline void rt_genid_bump_ipv4(struct net *net)
87898 {
87899- atomic_inc(&net->ipv4.rt_genid);
87900+ atomic_inc_unchecked(&net->ipv4.rt_genid);
87901 }
87902
87903 #if IS_ENABLED(CONFIG_IPV6)
87904 static inline int rt_genid_ipv6(struct net *net)
87905 {
87906- return atomic_read(&net->ipv6.rt_genid);
87907+ return atomic_read_unchecked(&net->ipv6.rt_genid);
87908 }
87909
87910 static inline void rt_genid_bump_ipv6(struct net *net)
87911 {
87912- atomic_inc(&net->ipv6.rt_genid);
87913+ atomic_inc_unchecked(&net->ipv6.rt_genid);
87914 }
87915 #else
87916 static inline int rt_genid_ipv6(struct net *net)
87917@@ -390,12 +394,12 @@ static inline void rt_genid_bump_all(struct net *net)
87918
87919 static inline int fnhe_genid(struct net *net)
87920 {
87921- return atomic_read(&net->fnhe_genid);
87922+ return atomic_read_unchecked(&net->fnhe_genid);
87923 }
87924
87925 static inline void fnhe_genid_bump(struct net *net)
87926 {
87927- atomic_inc(&net->fnhe_genid);
87928+ atomic_inc_unchecked(&net->fnhe_genid);
87929 }
87930
87931 #endif /* __NET_NET_NAMESPACE_H */
87932diff --git a/include/net/netdma.h b/include/net/netdma.h
87933index 8ba8ce2..99b7fff 100644
87934--- a/include/net/netdma.h
87935+++ b/include/net/netdma.h
87936@@ -24,7 +24,7 @@
87937 #include <linux/dmaengine.h>
87938 #include <linux/skbuff.h>
87939
87940-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
87941+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
87942 struct sk_buff *skb, int offset, struct iovec *to,
87943 size_t len, struct dma_pinned_list *pinned_list);
87944
87945diff --git a/include/net/netlink.h b/include/net/netlink.h
87946index 2b47eaa..6d5bcc2 100644
87947--- a/include/net/netlink.h
87948+++ b/include/net/netlink.h
87949@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
87950 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
87951 {
87952 if (mark)
87953- skb_trim(skb, (unsigned char *) mark - skb->data);
87954+ skb_trim(skb, (const unsigned char *) mark - skb->data);
87955 }
87956
87957 /**
87958diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
87959index 773cce3..6a11852 100644
87960--- a/include/net/netns/conntrack.h
87961+++ b/include/net/netns/conntrack.h
87962@@ -13,10 +13,10 @@ struct nf_conntrack_ecache;
87963 struct nf_proto_net {
87964 #ifdef CONFIG_SYSCTL
87965 struct ctl_table_header *ctl_table_header;
87966- struct ctl_table *ctl_table;
87967+ ctl_table_no_const *ctl_table;
87968 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
87969 struct ctl_table_header *ctl_compat_header;
87970- struct ctl_table *ctl_compat_table;
87971+ ctl_table_no_const *ctl_compat_table;
87972 #endif
87973 #endif
87974 unsigned int users;
87975@@ -59,7 +59,7 @@ struct nf_ip_net {
87976 struct nf_icmp_net icmpv6;
87977 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
87978 struct ctl_table_header *ctl_table_header;
87979- struct ctl_table *ctl_table;
87980+ ctl_table_no_const *ctl_table;
87981 #endif
87982 };
87983
87984diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
87985index aec5e12..807233f 100644
87986--- a/include/net/netns/ipv4.h
87987+++ b/include/net/netns/ipv4.h
87988@@ -82,7 +82,7 @@ struct netns_ipv4 {
87989
87990 struct ping_group_range ping_group_range;
87991
87992- atomic_t dev_addr_genid;
87993+ atomic_unchecked_t dev_addr_genid;
87994
87995 #ifdef CONFIG_SYSCTL
87996 unsigned long *sysctl_local_reserved_ports;
87997@@ -96,6 +96,6 @@ struct netns_ipv4 {
87998 struct fib_rules_ops *mr_rules_ops;
87999 #endif
88000 #endif
88001- atomic_t rt_genid;
88002+ atomic_unchecked_t rt_genid;
88003 };
88004 #endif
88005diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
88006index 19d3446..3c87195 100644
88007--- a/include/net/netns/ipv6.h
88008+++ b/include/net/netns/ipv6.h
88009@@ -74,8 +74,8 @@ struct netns_ipv6 {
88010 struct fib_rules_ops *mr6_rules_ops;
88011 #endif
88012 #endif
88013- atomic_t dev_addr_genid;
88014- atomic_t rt_genid;
88015+ atomic_unchecked_t dev_addr_genid;
88016+ atomic_unchecked_t rt_genid;
88017 };
88018
88019 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
88020diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
88021index 3492434..209f58c 100644
88022--- a/include/net/netns/xfrm.h
88023+++ b/include/net/netns/xfrm.h
88024@@ -64,7 +64,7 @@ struct netns_xfrm {
88025
88026 /* flow cache part */
88027 struct flow_cache flow_cache_global;
88028- atomic_t flow_cache_genid;
88029+ atomic_unchecked_t flow_cache_genid;
88030 struct list_head flow_cache_gc_list;
88031 spinlock_t flow_cache_gc_lock;
88032 struct work_struct flow_cache_gc_work;
88033diff --git a/include/net/ping.h b/include/net/ping.h
88034index 026479b..d9b2829 100644
88035--- a/include/net/ping.h
88036+++ b/include/net/ping.h
88037@@ -54,7 +54,7 @@ struct ping_iter_state {
88038
88039 extern struct proto ping_prot;
88040 #if IS_ENABLED(CONFIG_IPV6)
88041-extern struct pingv6_ops pingv6_ops;
88042+extern struct pingv6_ops *pingv6_ops;
88043 #endif
88044
88045 struct pingfakehdr {
88046diff --git a/include/net/protocol.h b/include/net/protocol.h
88047index d6fcc1f..ca277058 100644
88048--- a/include/net/protocol.h
88049+++ b/include/net/protocol.h
88050@@ -49,7 +49,7 @@ struct net_protocol {
88051 * socket lookup?
88052 */
88053 icmp_strict_tag_validation:1;
88054-};
88055+} __do_const;
88056
88057 #if IS_ENABLED(CONFIG_IPV6)
88058 struct inet6_protocol {
88059@@ -62,7 +62,7 @@ struct inet6_protocol {
88060 u8 type, u8 code, int offset,
88061 __be32 info);
88062 unsigned int flags; /* INET6_PROTO_xxx */
88063-};
88064+} __do_const;
88065
88066 #define INET6_PROTO_NOPOLICY 0x1
88067 #define INET6_PROTO_FINAL 0x2
88068diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
88069index 72240e5..8c14bef 100644
88070--- a/include/net/rtnetlink.h
88071+++ b/include/net/rtnetlink.h
88072@@ -93,7 +93,7 @@ struct rtnl_link_ops {
88073 int (*fill_slave_info)(struct sk_buff *skb,
88074 const struct net_device *dev,
88075 const struct net_device *slave_dev);
88076-};
88077+} __do_const;
88078
88079 int __rtnl_link_register(struct rtnl_link_ops *ops);
88080 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
88081diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
88082index 4a5b9a3..ca27d73 100644
88083--- a/include/net/sctp/checksum.h
88084+++ b/include/net/sctp/checksum.h
88085@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
88086 unsigned int offset)
88087 {
88088 struct sctphdr *sh = sctp_hdr(skb);
88089- __le32 ret, old = sh->checksum;
88090- const struct skb_checksum_ops ops = {
88091+ __le32 ret, old = sh->checksum;
88092+ static const struct skb_checksum_ops ops = {
88093 .update = sctp_csum_update,
88094 .combine = sctp_csum_combine,
88095 };
88096diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
88097index 7f4eeb3..37e8fe1 100644
88098--- a/include/net/sctp/sm.h
88099+++ b/include/net/sctp/sm.h
88100@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
88101 typedef struct {
88102 sctp_state_fn_t *fn;
88103 const char *name;
88104-} sctp_sm_table_entry_t;
88105+} __do_const sctp_sm_table_entry_t;
88106
88107 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
88108 * currently in use.
88109@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
88110 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
88111
88112 /* Extern declarations for major data structures. */
88113-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
88114+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
88115
88116
88117 /* Get the size of a DATA chunk payload. */
88118diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
88119index f38588bf..94c1795 100644
88120--- a/include/net/sctp/structs.h
88121+++ b/include/net/sctp/structs.h
88122@@ -507,7 +507,7 @@ struct sctp_pf {
88123 struct sctp_association *asoc);
88124 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
88125 struct sctp_af *af;
88126-};
88127+} __do_const;
88128
88129
88130 /* Structure to track chunk fragments that have been acked, but peer
88131diff --git a/include/net/sock.h b/include/net/sock.h
88132index 1563507..20d5d0e 100644
88133--- a/include/net/sock.h
88134+++ b/include/net/sock.h
88135@@ -349,7 +349,7 @@ struct sock {
88136 unsigned int sk_napi_id;
88137 unsigned int sk_ll_usec;
88138 #endif
88139- atomic_t sk_drops;
88140+ atomic_unchecked_t sk_drops;
88141 int sk_rcvbuf;
88142
88143 struct sk_filter __rcu *sk_filter;
88144@@ -1038,7 +1038,7 @@ struct proto {
88145 void (*destroy_cgroup)(struct mem_cgroup *memcg);
88146 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
88147 #endif
88148-};
88149+} __randomize_layout;
88150
88151 /*
88152 * Bits in struct cg_proto.flags
88153@@ -1225,7 +1225,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
88154 return ret >> PAGE_SHIFT;
88155 }
88156
88157-static inline long
88158+static inline long __intentional_overflow(-1)
88159 sk_memory_allocated(const struct sock *sk)
88160 {
88161 struct proto *prot = sk->sk_prot;
88162@@ -1370,7 +1370,7 @@ struct sock_iocb {
88163 struct scm_cookie *scm;
88164 struct msghdr *msg, async_msg;
88165 struct kiocb *kiocb;
88166-};
88167+} __randomize_layout;
88168
88169 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
88170 {
88171@@ -1623,6 +1623,33 @@ void sk_common_release(struct sock *sk);
88172 /* Initialise core socket variables */
88173 void sock_init_data(struct socket *sock, struct sock *sk);
88174
88175+void sk_filter_release_rcu(struct rcu_head *rcu);
88176+
88177+/**
88178+ * sk_filter_release - release a socket filter
88179+ * @fp: filter to remove
88180+ *
88181+ * Remove a filter from a socket and release its resources.
88182+ */
88183+
88184+static inline void sk_filter_release(struct sk_filter *fp)
88185+{
88186+ if (atomic_dec_and_test(&fp->refcnt))
88187+ call_rcu(&fp->rcu, sk_filter_release_rcu);
88188+}
88189+
88190+static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
88191+{
88192+ atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
88193+ sk_filter_release(fp);
88194+}
88195+
88196+static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
88197+{
88198+ atomic_inc(&fp->refcnt);
88199+ atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
88200+}
88201+
88202 /*
88203 * Socket reference counting postulates.
88204 *
88205@@ -1805,7 +1832,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
88206 }
88207
88208 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
88209- char __user *from, char *to,
88210+ char __user *from, unsigned char *to,
88211 int copy, int offset)
88212 {
88213 if (skb->ip_summed == CHECKSUM_NONE) {
88214@@ -2067,7 +2094,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
88215 }
88216 }
88217
88218-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
88219+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
88220
88221 /**
88222 * sk_page_frag - return an appropriate page_frag
88223diff --git a/include/net/tcp.h b/include/net/tcp.h
88224index 7286db8..f1aa7dc 100644
88225--- a/include/net/tcp.h
88226+++ b/include/net/tcp.h
88227@@ -535,7 +535,7 @@ void tcp_retransmit_timer(struct sock *sk);
88228 void tcp_xmit_retransmit_queue(struct sock *);
88229 void tcp_simple_retransmit(struct sock *);
88230 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
88231-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
88232+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
88233
88234 void tcp_send_probe0(struct sock *);
88235 void tcp_send_partial(struct sock *);
88236@@ -708,8 +708,8 @@ struct tcp_skb_cb {
88237 struct inet6_skb_parm h6;
88238 #endif
88239 } header; /* For incoming frames */
88240- __u32 seq; /* Starting sequence number */
88241- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
88242+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
88243+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
88244 __u32 when; /* used to compute rtt's */
88245 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
88246
88247@@ -723,7 +723,7 @@ struct tcp_skb_cb {
88248
88249 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
88250 /* 1 byte hole */
88251- __u32 ack_seq; /* Sequence number ACK'd */
88252+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
88253 };
88254
88255 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
88256diff --git a/include/net/xfrm.h b/include/net/xfrm.h
88257index 721e9c3b..3c81bbf 100644
88258--- a/include/net/xfrm.h
88259+++ b/include/net/xfrm.h
88260@@ -285,7 +285,6 @@ struct xfrm_dst;
88261 struct xfrm_policy_afinfo {
88262 unsigned short family;
88263 struct dst_ops *dst_ops;
88264- void (*garbage_collect)(struct net *net);
88265 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
88266 const xfrm_address_t *saddr,
88267 const xfrm_address_t *daddr);
88268@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
88269 struct net_device *dev,
88270 const struct flowi *fl);
88271 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
88272-};
88273+} __do_const;
88274
88275 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
88276 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
88277@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
88278 int (*transport_finish)(struct sk_buff *skb,
88279 int async);
88280 void (*local_error)(struct sk_buff *skb, u32 mtu);
88281-};
88282+} __do_const;
88283
88284 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
88285 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
88286@@ -437,7 +436,7 @@ struct xfrm_mode {
88287 struct module *owner;
88288 unsigned int encap;
88289 int flags;
88290-};
88291+} __do_const;
88292
88293 /* Flags for xfrm_mode. */
88294 enum {
88295@@ -534,7 +533,7 @@ struct xfrm_policy {
88296 struct timer_list timer;
88297
88298 struct flow_cache_object flo;
88299- atomic_t genid;
88300+ atomic_unchecked_t genid;
88301 u32 priority;
88302 u32 index;
88303 struct xfrm_mark mark;
88304@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
88305 }
88306
88307 void xfrm_garbage_collect(struct net *net);
88308+void xfrm_garbage_collect_deferred(struct net *net);
88309
88310 #else
88311
88312@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
88313 static inline void xfrm_garbage_collect(struct net *net)
88314 {
88315 }
88316+static inline void xfrm_garbage_collect_deferred(struct net *net)
88317+{
88318+}
88319 #endif
88320
88321 static __inline__
88322diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
88323index 1017e0b..227aa4d 100644
88324--- a/include/rdma/iw_cm.h
88325+++ b/include/rdma/iw_cm.h
88326@@ -122,7 +122,7 @@ struct iw_cm_verbs {
88327 int backlog);
88328
88329 int (*destroy_listen)(struct iw_cm_id *cm_id);
88330-};
88331+} __no_const;
88332
88333 /**
88334 * iw_create_cm_id - Create an IW CM identifier.
88335diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
88336index 52beadf..598734c 100644
88337--- a/include/scsi/libfc.h
88338+++ b/include/scsi/libfc.h
88339@@ -771,6 +771,7 @@ struct libfc_function_template {
88340 */
88341 void (*disc_stop_final) (struct fc_lport *);
88342 };
88343+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
88344
88345 /**
88346 * struct fc_disc - Discovery context
88347@@ -875,7 +876,7 @@ struct fc_lport {
88348 struct fc_vport *vport;
88349
88350 /* Operational Information */
88351- struct libfc_function_template tt;
88352+ libfc_function_template_no_const tt;
88353 u8 link_up;
88354 u8 qfull;
88355 enum fc_lport_state state;
88356diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
88357index 27ab310..60dc245 100644
88358--- a/include/scsi/scsi_device.h
88359+++ b/include/scsi/scsi_device.h
88360@@ -187,9 +187,9 @@ struct scsi_device {
88361 unsigned int max_device_blocked; /* what device_blocked counts down from */
88362 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
88363
88364- atomic_t iorequest_cnt;
88365- atomic_t iodone_cnt;
88366- atomic_t ioerr_cnt;
88367+ atomic_unchecked_t iorequest_cnt;
88368+ atomic_unchecked_t iodone_cnt;
88369+ atomic_unchecked_t ioerr_cnt;
88370
88371 struct device sdev_gendev,
88372 sdev_dev;
88373diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
88374index 8c79980..723f6f9 100644
88375--- a/include/scsi/scsi_transport_fc.h
88376+++ b/include/scsi/scsi_transport_fc.h
88377@@ -752,7 +752,8 @@ struct fc_function_template {
88378 unsigned long show_host_system_hostname:1;
88379
88380 unsigned long disable_target_scan:1;
88381-};
88382+} __do_const;
88383+typedef struct fc_function_template __no_const fc_function_template_no_const;
88384
88385
88386 /**
88387diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
88388index ae6c3b8..fd748ac 100644
88389--- a/include/sound/compress_driver.h
88390+++ b/include/sound/compress_driver.h
88391@@ -128,7 +128,7 @@ struct snd_compr_ops {
88392 struct snd_compr_caps *caps);
88393 int (*get_codec_caps) (struct snd_compr_stream *stream,
88394 struct snd_compr_codec_caps *codec);
88395-};
88396+} __no_const;
88397
88398 /**
88399 * struct snd_compr: Compressed device
88400diff --git a/include/sound/soc.h b/include/sound/soc.h
88401index ed9e2d7..aad0887 100644
88402--- a/include/sound/soc.h
88403+++ b/include/sound/soc.h
88404@@ -798,7 +798,7 @@ struct snd_soc_codec_driver {
88405 /* probe ordering - for components with runtime dependencies */
88406 int probe_order;
88407 int remove_order;
88408-};
88409+} __do_const;
88410
88411 /* SoC platform interface */
88412 struct snd_soc_platform_driver {
88413@@ -845,7 +845,7 @@ struct snd_soc_platform_driver {
88414 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
88415 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
88416 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
88417-};
88418+} __do_const;
88419
88420 struct snd_soc_platform {
88421 const char *name;
88422diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
88423index 9ec9864..e2ee1ee 100644
88424--- a/include/target/target_core_base.h
88425+++ b/include/target/target_core_base.h
88426@@ -761,7 +761,7 @@ struct se_device {
88427 atomic_long_t write_bytes;
88428 /* Active commands on this virtual SE device */
88429 atomic_t simple_cmds;
88430- atomic_t dev_ordered_id;
88431+ atomic_unchecked_t dev_ordered_id;
88432 atomic_t dev_ordered_sync;
88433 atomic_t dev_qf_count;
88434 int export_count;
88435diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
88436new file mode 100644
88437index 0000000..fb634b7
88438--- /dev/null
88439+++ b/include/trace/events/fs.h
88440@@ -0,0 +1,53 @@
88441+#undef TRACE_SYSTEM
88442+#define TRACE_SYSTEM fs
88443+
88444+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
88445+#define _TRACE_FS_H
88446+
88447+#include <linux/fs.h>
88448+#include <linux/tracepoint.h>
88449+
88450+TRACE_EVENT(do_sys_open,
88451+
88452+ TP_PROTO(const char *filename, int flags, int mode),
88453+
88454+ TP_ARGS(filename, flags, mode),
88455+
88456+ TP_STRUCT__entry(
88457+ __string( filename, filename )
88458+ __field( int, flags )
88459+ __field( int, mode )
88460+ ),
88461+
88462+ TP_fast_assign(
88463+ __assign_str(filename, filename);
88464+ __entry->flags = flags;
88465+ __entry->mode = mode;
88466+ ),
88467+
88468+ TP_printk("\"%s\" %x %o",
88469+ __get_str(filename), __entry->flags, __entry->mode)
88470+);
88471+
88472+TRACE_EVENT(open_exec,
88473+
88474+ TP_PROTO(const char *filename),
88475+
88476+ TP_ARGS(filename),
88477+
88478+ TP_STRUCT__entry(
88479+ __string( filename, filename )
88480+ ),
88481+
88482+ TP_fast_assign(
88483+ __assign_str(filename, filename);
88484+ ),
88485+
88486+ TP_printk("\"%s\"",
88487+ __get_str(filename))
88488+);
88489+
88490+#endif /* _TRACE_FS_H */
88491+
88492+/* This part must be outside protection */
88493+#include <trace/define_trace.h>
88494diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
88495index 1c09820..7f5ec79 100644
88496--- a/include/trace/events/irq.h
88497+++ b/include/trace/events/irq.h
88498@@ -36,7 +36,7 @@ struct softirq_action;
88499 */
88500 TRACE_EVENT(irq_handler_entry,
88501
88502- TP_PROTO(int irq, struct irqaction *action),
88503+ TP_PROTO(int irq, const struct irqaction *action),
88504
88505 TP_ARGS(irq, action),
88506
88507@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
88508 */
88509 TRACE_EVENT(irq_handler_exit,
88510
88511- TP_PROTO(int irq, struct irqaction *action, int ret),
88512+ TP_PROTO(int irq, const struct irqaction *action, int ret),
88513
88514 TP_ARGS(irq, action, ret),
88515
88516diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
88517index 7caf44c..23c6f27 100644
88518--- a/include/uapi/linux/a.out.h
88519+++ b/include/uapi/linux/a.out.h
88520@@ -39,6 +39,14 @@ enum machine_type {
88521 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
88522 };
88523
88524+/* Constants for the N_FLAGS field */
88525+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
88526+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
88527+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
88528+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
88529+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
88530+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
88531+
88532 #if !defined (N_MAGIC)
88533 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
88534 #endif
88535diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
88536index 22b6ad3..aeba37e 100644
88537--- a/include/uapi/linux/bcache.h
88538+++ b/include/uapi/linux/bcache.h
88539@@ -5,6 +5,7 @@
88540 * Bcache on disk data structures
88541 */
88542
88543+#include <linux/compiler.h>
88544 #include <asm/types.h>
88545
88546 #define BITMASK(name, type, field, offset, size) \
88547@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
88548 /* Btree keys - all units are in sectors */
88549
88550 struct bkey {
88551- __u64 high;
88552- __u64 low;
88553+ __u64 high __intentional_overflow(-1);
88554+ __u64 low __intentional_overflow(-1);
88555 __u64 ptr[];
88556 };
88557
88558diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
88559index d876736..ccce5c0 100644
88560--- a/include/uapi/linux/byteorder/little_endian.h
88561+++ b/include/uapi/linux/byteorder/little_endian.h
88562@@ -42,51 +42,51 @@
88563
88564 static inline __le64 __cpu_to_le64p(const __u64 *p)
88565 {
88566- return (__force __le64)*p;
88567+ return (__force const __le64)*p;
88568 }
88569-static inline __u64 __le64_to_cpup(const __le64 *p)
88570+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
88571 {
88572- return (__force __u64)*p;
88573+ return (__force const __u64)*p;
88574 }
88575 static inline __le32 __cpu_to_le32p(const __u32 *p)
88576 {
88577- return (__force __le32)*p;
88578+ return (__force const __le32)*p;
88579 }
88580 static inline __u32 __le32_to_cpup(const __le32 *p)
88581 {
88582- return (__force __u32)*p;
88583+ return (__force const __u32)*p;
88584 }
88585 static inline __le16 __cpu_to_le16p(const __u16 *p)
88586 {
88587- return (__force __le16)*p;
88588+ return (__force const __le16)*p;
88589 }
88590 static inline __u16 __le16_to_cpup(const __le16 *p)
88591 {
88592- return (__force __u16)*p;
88593+ return (__force const __u16)*p;
88594 }
88595 static inline __be64 __cpu_to_be64p(const __u64 *p)
88596 {
88597- return (__force __be64)__swab64p(p);
88598+ return (__force const __be64)__swab64p(p);
88599 }
88600 static inline __u64 __be64_to_cpup(const __be64 *p)
88601 {
88602- return __swab64p((__u64 *)p);
88603+ return __swab64p((const __u64 *)p);
88604 }
88605 static inline __be32 __cpu_to_be32p(const __u32 *p)
88606 {
88607- return (__force __be32)__swab32p(p);
88608+ return (__force const __be32)__swab32p(p);
88609 }
88610-static inline __u32 __be32_to_cpup(const __be32 *p)
88611+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
88612 {
88613- return __swab32p((__u32 *)p);
88614+ return __swab32p((const __u32 *)p);
88615 }
88616 static inline __be16 __cpu_to_be16p(const __u16 *p)
88617 {
88618- return (__force __be16)__swab16p(p);
88619+ return (__force const __be16)__swab16p(p);
88620 }
88621 static inline __u16 __be16_to_cpup(const __be16 *p)
88622 {
88623- return __swab16p((__u16 *)p);
88624+ return __swab16p((const __u16 *)p);
88625 }
88626 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
88627 #define __le64_to_cpus(x) do { (void)(x); } while (0)
88628diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
88629index ef6103b..d4e65dd 100644
88630--- a/include/uapi/linux/elf.h
88631+++ b/include/uapi/linux/elf.h
88632@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
88633 #define PT_GNU_EH_FRAME 0x6474e550
88634
88635 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
88636+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
88637+
88638+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
88639+
88640+/* Constants for the e_flags field */
88641+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
88642+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
88643+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
88644+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
88645+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
88646+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
88647
88648 /*
88649 * Extended Numbering
88650@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
88651 #define DT_DEBUG 21
88652 #define DT_TEXTREL 22
88653 #define DT_JMPREL 23
88654+#define DT_FLAGS 30
88655+ #define DF_TEXTREL 0x00000004
88656 #define DT_ENCODING 32
88657 #define OLD_DT_LOOS 0x60000000
88658 #define DT_LOOS 0x6000000d
88659@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
88660 #define PF_W 0x2
88661 #define PF_X 0x1
88662
88663+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
88664+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
88665+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
88666+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
88667+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
88668+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
88669+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
88670+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
88671+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
88672+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
88673+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
88674+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
88675+
88676 typedef struct elf32_phdr{
88677 Elf32_Word p_type;
88678 Elf32_Off p_offset;
88679@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
88680 #define EI_OSABI 7
88681 #define EI_PAD 8
88682
88683+#define EI_PAX 14
88684+
88685 #define ELFMAG0 0x7f /* EI_MAG */
88686 #define ELFMAG1 'E'
88687 #define ELFMAG2 'L'
88688diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
88689index aa169c4..6a2771d 100644
88690--- a/include/uapi/linux/personality.h
88691+++ b/include/uapi/linux/personality.h
88692@@ -30,6 +30,7 @@ enum {
88693 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
88694 ADDR_NO_RANDOMIZE | \
88695 ADDR_COMPAT_LAYOUT | \
88696+ ADDR_LIMIT_3GB | \
88697 MMAP_PAGE_ZERO)
88698
88699 /*
88700diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
88701index 7530e74..e714828 100644
88702--- a/include/uapi/linux/screen_info.h
88703+++ b/include/uapi/linux/screen_info.h
88704@@ -43,7 +43,8 @@ struct screen_info {
88705 __u16 pages; /* 0x32 */
88706 __u16 vesa_attributes; /* 0x34 */
88707 __u32 capabilities; /* 0x36 */
88708- __u8 _reserved[6]; /* 0x3a */
88709+ __u16 vesapm_size; /* 0x3a */
88710+ __u8 _reserved[4]; /* 0x3c */
88711 } __attribute__((packed));
88712
88713 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
88714diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
88715index 0e011eb..82681b1 100644
88716--- a/include/uapi/linux/swab.h
88717+++ b/include/uapi/linux/swab.h
88718@@ -43,7 +43,7 @@
88719 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
88720 */
88721
88722-static inline __attribute_const__ __u16 __fswab16(__u16 val)
88723+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
88724 {
88725 #ifdef __HAVE_BUILTIN_BSWAP16__
88726 return __builtin_bswap16(val);
88727@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
88728 #endif
88729 }
88730
88731-static inline __attribute_const__ __u32 __fswab32(__u32 val)
88732+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
88733 {
88734 #ifdef __HAVE_BUILTIN_BSWAP32__
88735 return __builtin_bswap32(val);
88736@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
88737 #endif
88738 }
88739
88740-static inline __attribute_const__ __u64 __fswab64(__u64 val)
88741+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
88742 {
88743 #ifdef __HAVE_BUILTIN_BSWAP64__
88744 return __builtin_bswap64(val);
88745diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
88746index 6d67213..552fdd9 100644
88747--- a/include/uapi/linux/sysctl.h
88748+++ b/include/uapi/linux/sysctl.h
88749@@ -155,8 +155,6 @@ enum
88750 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
88751 };
88752
88753-
88754-
88755 /* CTL_VM names: */
88756 enum
88757 {
88758diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
88759index 168ff50..a921df2 100644
88760--- a/include/uapi/linux/videodev2.h
88761+++ b/include/uapi/linux/videodev2.h
88762@@ -1253,7 +1253,7 @@ struct v4l2_ext_control {
88763 union {
88764 __s32 value;
88765 __s64 value64;
88766- char *string;
88767+ char __user *string;
88768 };
88769 } __attribute__ ((packed));
88770
88771diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
88772index c38355c..17a57bc 100644
88773--- a/include/uapi/linux/xattr.h
88774+++ b/include/uapi/linux/xattr.h
88775@@ -73,5 +73,9 @@
88776 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
88777 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
88778
88779+/* User namespace */
88780+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
88781+#define XATTR_PAX_FLAGS_SUFFIX "flags"
88782+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
88783
88784 #endif /* _UAPI_LINUX_XATTR_H */
88785diff --git a/include/video/udlfb.h b/include/video/udlfb.h
88786index f9466fa..f4e2b81 100644
88787--- a/include/video/udlfb.h
88788+++ b/include/video/udlfb.h
88789@@ -53,10 +53,10 @@ struct dlfb_data {
88790 u32 pseudo_palette[256];
88791 int blank_mode; /*one of FB_BLANK_ */
88792 /* blit-only rendering path metrics, exposed through sysfs */
88793- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
88794- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
88795- atomic_t bytes_sent; /* to usb, after compression including overhead */
88796- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
88797+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
88798+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
88799+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
88800+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
88801 };
88802
88803 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
88804diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
88805index 30f5362..8ed8ac9 100644
88806--- a/include/video/uvesafb.h
88807+++ b/include/video/uvesafb.h
88808@@ -122,6 +122,7 @@ struct uvesafb_par {
88809 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
88810 u8 pmi_setpal; /* PMI for palette changes */
88811 u16 *pmi_base; /* protected mode interface location */
88812+ u8 *pmi_code; /* protected mode code location */
88813 void *pmi_start;
88814 void *pmi_pal;
88815 u8 *vbe_state_orig; /*
88816diff --git a/init/Kconfig b/init/Kconfig
88817index 9d76b99..d378b1e 100644
88818--- a/init/Kconfig
88819+++ b/init/Kconfig
88820@@ -1105,6 +1105,7 @@ endif # CGROUPS
88821
88822 config CHECKPOINT_RESTORE
88823 bool "Checkpoint/restore support" if EXPERT
88824+ depends on !GRKERNSEC
88825 default n
88826 help
88827 Enables additional kernel features in a sake of checkpoint/restore.
88828@@ -1589,7 +1590,7 @@ config SLUB_DEBUG
88829
88830 config COMPAT_BRK
88831 bool "Disable heap randomization"
88832- default y
88833+ default n
88834 help
88835 Randomizing heap placement makes heap exploits harder, but it
88836 also breaks ancient binaries (including anything libc5 based).
88837@@ -1877,7 +1878,7 @@ config INIT_ALL_POSSIBLE
88838 config STOP_MACHINE
88839 bool
88840 default y
88841- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
88842+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
88843 help
88844 Need stop_machine() primitive.
88845
88846diff --git a/init/Makefile b/init/Makefile
88847index 7bc47ee..6da2dc7 100644
88848--- a/init/Makefile
88849+++ b/init/Makefile
88850@@ -2,6 +2,9 @@
88851 # Makefile for the linux kernel.
88852 #
88853
88854+ccflags-y := $(GCC_PLUGINS_CFLAGS)
88855+asflags-y := $(GCC_PLUGINS_AFLAGS)
88856+
88857 obj-y := main.o version.o mounts.o
88858 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
88859 obj-y += noinitramfs.o
88860diff --git a/init/do_mounts.c b/init/do_mounts.c
88861index 82f2288..ea1430a 100644
88862--- a/init/do_mounts.c
88863+++ b/init/do_mounts.c
88864@@ -359,11 +359,11 @@ static void __init get_fs_names(char *page)
88865 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
88866 {
88867 struct super_block *s;
88868- int err = sys_mount(name, "/root", fs, flags, data);
88869+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
88870 if (err)
88871 return err;
88872
88873- sys_chdir("/root");
88874+ sys_chdir((const char __force_user *)"/root");
88875 s = current->fs->pwd.dentry->d_sb;
88876 ROOT_DEV = s->s_dev;
88877 printk(KERN_INFO
88878@@ -484,18 +484,18 @@ void __init change_floppy(char *fmt, ...)
88879 va_start(args, fmt);
88880 vsprintf(buf, fmt, args);
88881 va_end(args);
88882- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
88883+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
88884 if (fd >= 0) {
88885 sys_ioctl(fd, FDEJECT, 0);
88886 sys_close(fd);
88887 }
88888 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
88889- fd = sys_open("/dev/console", O_RDWR, 0);
88890+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
88891 if (fd >= 0) {
88892 sys_ioctl(fd, TCGETS, (long)&termios);
88893 termios.c_lflag &= ~ICANON;
88894 sys_ioctl(fd, TCSETSF, (long)&termios);
88895- sys_read(fd, &c, 1);
88896+ sys_read(fd, (char __user *)&c, 1);
88897 termios.c_lflag |= ICANON;
88898 sys_ioctl(fd, TCSETSF, (long)&termios);
88899 sys_close(fd);
88900@@ -589,8 +589,8 @@ void __init prepare_namespace(void)
88901 mount_root();
88902 out:
88903 devtmpfs_mount("dev");
88904- sys_mount(".", "/", NULL, MS_MOVE, NULL);
88905- sys_chroot(".");
88906+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
88907+ sys_chroot((const char __force_user *)".");
88908 }
88909
88910 static bool is_tmpfs;
88911diff --git a/init/do_mounts.h b/init/do_mounts.h
88912index f5b978a..69dbfe8 100644
88913--- a/init/do_mounts.h
88914+++ b/init/do_mounts.h
88915@@ -15,15 +15,15 @@ extern int root_mountflags;
88916
88917 static inline int create_dev(char *name, dev_t dev)
88918 {
88919- sys_unlink(name);
88920- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
88921+ sys_unlink((char __force_user *)name);
88922+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
88923 }
88924
88925 #if BITS_PER_LONG == 32
88926 static inline u32 bstat(char *name)
88927 {
88928 struct stat64 stat;
88929- if (sys_stat64(name, &stat) != 0)
88930+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
88931 return 0;
88932 if (!S_ISBLK(stat.st_mode))
88933 return 0;
88934@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
88935 static inline u32 bstat(char *name)
88936 {
88937 struct stat stat;
88938- if (sys_newstat(name, &stat) != 0)
88939+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
88940 return 0;
88941 if (!S_ISBLK(stat.st_mode))
88942 return 0;
88943diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
88944index 3e0878e..8a9d7a0 100644
88945--- a/init/do_mounts_initrd.c
88946+++ b/init/do_mounts_initrd.c
88947@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
88948 {
88949 sys_unshare(CLONE_FS | CLONE_FILES);
88950 /* stdin/stdout/stderr for /linuxrc */
88951- sys_open("/dev/console", O_RDWR, 0);
88952+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
88953 sys_dup(0);
88954 sys_dup(0);
88955 /* move initrd over / and chdir/chroot in initrd root */
88956- sys_chdir("/root");
88957- sys_mount(".", "/", NULL, MS_MOVE, NULL);
88958- sys_chroot(".");
88959+ sys_chdir((const char __force_user *)"/root");
88960+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
88961+ sys_chroot((const char __force_user *)".");
88962 sys_setsid();
88963 return 0;
88964 }
88965@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
88966 create_dev("/dev/root.old", Root_RAM0);
88967 /* mount initrd on rootfs' /root */
88968 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
88969- sys_mkdir("/old", 0700);
88970- sys_chdir("/old");
88971+ sys_mkdir((const char __force_user *)"/old", 0700);
88972+ sys_chdir((const char __force_user *)"/old");
88973
88974 /* try loading default modules from initrd */
88975 load_default_modules();
88976@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
88977 current->flags &= ~PF_FREEZER_SKIP;
88978
88979 /* move initrd to rootfs' /old */
88980- sys_mount("..", ".", NULL, MS_MOVE, NULL);
88981+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
88982 /* switch root and cwd back to / of rootfs */
88983- sys_chroot("..");
88984+ sys_chroot((const char __force_user *)"..");
88985
88986 if (new_decode_dev(real_root_dev) == Root_RAM0) {
88987- sys_chdir("/old");
88988+ sys_chdir((const char __force_user *)"/old");
88989 return;
88990 }
88991
88992- sys_chdir("/");
88993+ sys_chdir((const char __force_user *)"/");
88994 ROOT_DEV = new_decode_dev(real_root_dev);
88995 mount_root();
88996
88997 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
88998- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
88999+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
89000 if (!error)
89001 printk("okay\n");
89002 else {
89003- int fd = sys_open("/dev/root.old", O_RDWR, 0);
89004+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
89005 if (error == -ENOENT)
89006 printk("/initrd does not exist. Ignored.\n");
89007 else
89008 printk("failed\n");
89009 printk(KERN_NOTICE "Unmounting old root\n");
89010- sys_umount("/old", MNT_DETACH);
89011+ sys_umount((char __force_user *)"/old", MNT_DETACH);
89012 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
89013 if (fd < 0) {
89014 error = fd;
89015@@ -127,11 +127,11 @@ int __init initrd_load(void)
89016 * mounted in the normal path.
89017 */
89018 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
89019- sys_unlink("/initrd.image");
89020+ sys_unlink((const char __force_user *)"/initrd.image");
89021 handle_initrd();
89022 return 1;
89023 }
89024 }
89025- sys_unlink("/initrd.image");
89026+ sys_unlink((const char __force_user *)"/initrd.image");
89027 return 0;
89028 }
89029diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
89030index 8cb6db5..d729f50 100644
89031--- a/init/do_mounts_md.c
89032+++ b/init/do_mounts_md.c
89033@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
89034 partitioned ? "_d" : "", minor,
89035 md_setup_args[ent].device_names);
89036
89037- fd = sys_open(name, 0, 0);
89038+ fd = sys_open((char __force_user *)name, 0, 0);
89039 if (fd < 0) {
89040 printk(KERN_ERR "md: open failed - cannot start "
89041 "array %s\n", name);
89042@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
89043 * array without it
89044 */
89045 sys_close(fd);
89046- fd = sys_open(name, 0, 0);
89047+ fd = sys_open((char __force_user *)name, 0, 0);
89048 sys_ioctl(fd, BLKRRPART, 0);
89049 }
89050 sys_close(fd);
89051@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
89052
89053 wait_for_device_probe();
89054
89055- fd = sys_open("/dev/md0", 0, 0);
89056+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
89057 if (fd >= 0) {
89058 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
89059 sys_close(fd);
89060diff --git a/init/init_task.c b/init/init_task.c
89061index ba0a7f36..2bcf1d5 100644
89062--- a/init/init_task.c
89063+++ b/init/init_task.c
89064@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
89065 * Initial thread structure. Alignment of this is handled by a special
89066 * linker map entry.
89067 */
89068+#ifdef CONFIG_X86
89069+union thread_union init_thread_union __init_task_data;
89070+#else
89071 union thread_union init_thread_union __init_task_data =
89072 { INIT_THREAD_INFO(init_task) };
89073+#endif
89074diff --git a/init/initramfs.c b/init/initramfs.c
89075index a8497fa..35b3c90 100644
89076--- a/init/initramfs.c
89077+++ b/init/initramfs.c
89078@@ -84,7 +84,7 @@ static void __init free_hash(void)
89079 }
89080 }
89081
89082-static long __init do_utime(char *filename, time_t mtime)
89083+static long __init do_utime(char __force_user *filename, time_t mtime)
89084 {
89085 struct timespec t[2];
89086
89087@@ -119,7 +119,7 @@ static void __init dir_utime(void)
89088 struct dir_entry *de, *tmp;
89089 list_for_each_entry_safe(de, tmp, &dir_list, list) {
89090 list_del(&de->list);
89091- do_utime(de->name, de->mtime);
89092+ do_utime((char __force_user *)de->name, de->mtime);
89093 kfree(de->name);
89094 kfree(de);
89095 }
89096@@ -281,7 +281,7 @@ static int __init maybe_link(void)
89097 if (nlink >= 2) {
89098 char *old = find_link(major, minor, ino, mode, collected);
89099 if (old)
89100- return (sys_link(old, collected) < 0) ? -1 : 1;
89101+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
89102 }
89103 return 0;
89104 }
89105@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
89106 {
89107 struct stat st;
89108
89109- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
89110+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
89111 if (S_ISDIR(st.st_mode))
89112- sys_rmdir(path);
89113+ sys_rmdir((char __force_user *)path);
89114 else
89115- sys_unlink(path);
89116+ sys_unlink((char __force_user *)path);
89117 }
89118 }
89119
89120@@ -315,7 +315,7 @@ static int __init do_name(void)
89121 int openflags = O_WRONLY|O_CREAT;
89122 if (ml != 1)
89123 openflags |= O_TRUNC;
89124- wfd = sys_open(collected, openflags, mode);
89125+ wfd = sys_open((char __force_user *)collected, openflags, mode);
89126
89127 if (wfd >= 0) {
89128 sys_fchown(wfd, uid, gid);
89129@@ -327,17 +327,17 @@ static int __init do_name(void)
89130 }
89131 }
89132 } else if (S_ISDIR(mode)) {
89133- sys_mkdir(collected, mode);
89134- sys_chown(collected, uid, gid);
89135- sys_chmod(collected, mode);
89136+ sys_mkdir((char __force_user *)collected, mode);
89137+ sys_chown((char __force_user *)collected, uid, gid);
89138+ sys_chmod((char __force_user *)collected, mode);
89139 dir_add(collected, mtime);
89140 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
89141 S_ISFIFO(mode) || S_ISSOCK(mode)) {
89142 if (maybe_link() == 0) {
89143- sys_mknod(collected, mode, rdev);
89144- sys_chown(collected, uid, gid);
89145- sys_chmod(collected, mode);
89146- do_utime(collected, mtime);
89147+ sys_mknod((char __force_user *)collected, mode, rdev);
89148+ sys_chown((char __force_user *)collected, uid, gid);
89149+ sys_chmod((char __force_user *)collected, mode);
89150+ do_utime((char __force_user *)collected, mtime);
89151 }
89152 }
89153 return 0;
89154@@ -346,15 +346,15 @@ static int __init do_name(void)
89155 static int __init do_copy(void)
89156 {
89157 if (count >= body_len) {
89158- sys_write(wfd, victim, body_len);
89159+ sys_write(wfd, (char __force_user *)victim, body_len);
89160 sys_close(wfd);
89161- do_utime(vcollected, mtime);
89162+ do_utime((char __force_user *)vcollected, mtime);
89163 kfree(vcollected);
89164 eat(body_len);
89165 state = SkipIt;
89166 return 0;
89167 } else {
89168- sys_write(wfd, victim, count);
89169+ sys_write(wfd, (char __force_user *)victim, count);
89170 body_len -= count;
89171 eat(count);
89172 return 1;
89173@@ -365,9 +365,9 @@ static int __init do_symlink(void)
89174 {
89175 collected[N_ALIGN(name_len) + body_len] = '\0';
89176 clean_path(collected, 0);
89177- sys_symlink(collected + N_ALIGN(name_len), collected);
89178- sys_lchown(collected, uid, gid);
89179- do_utime(collected, mtime);
89180+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
89181+ sys_lchown((char __force_user *)collected, uid, gid);
89182+ do_utime((char __force_user *)collected, mtime);
89183 state = SkipIt;
89184 next_state = Reset;
89185 return 0;
89186diff --git a/init/main.c b/init/main.c
89187index e8ae1fe..f60f98c 100644
89188--- a/init/main.c
89189+++ b/init/main.c
89190@@ -98,6 +98,8 @@ extern void radix_tree_init(void);
89191 static inline void mark_rodata_ro(void) { }
89192 #endif
89193
89194+extern void grsecurity_init(void);
89195+
89196 /*
89197 * Debug helper: via this flag we know that we are in 'early bootup code'
89198 * where only the boot processor is running with IRQ disabled. This means
89199@@ -159,6 +161,75 @@ static int __init set_reset_devices(char *str)
89200
89201 __setup("reset_devices", set_reset_devices);
89202
89203+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
89204+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
89205+static int __init setup_grsec_proc_gid(char *str)
89206+{
89207+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
89208+ return 1;
89209+}
89210+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
89211+#endif
89212+
89213+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
89214+unsigned long pax_user_shadow_base __read_only;
89215+EXPORT_SYMBOL(pax_user_shadow_base);
89216+extern char pax_enter_kernel_user[];
89217+extern char pax_exit_kernel_user[];
89218+#endif
89219+
89220+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
89221+static int __init setup_pax_nouderef(char *str)
89222+{
89223+#ifdef CONFIG_X86_32
89224+ unsigned int cpu;
89225+ struct desc_struct *gdt;
89226+
89227+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
89228+ gdt = get_cpu_gdt_table(cpu);
89229+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
89230+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
89231+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
89232+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
89233+ }
89234+ loadsegment(ds, __KERNEL_DS);
89235+ loadsegment(es, __KERNEL_DS);
89236+ loadsegment(ss, __KERNEL_DS);
89237+#else
89238+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
89239+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
89240+ clone_pgd_mask = ~(pgdval_t)0UL;
89241+ pax_user_shadow_base = 0UL;
89242+ setup_clear_cpu_cap(X86_FEATURE_PCID);
89243+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
89244+#endif
89245+
89246+ return 0;
89247+}
89248+early_param("pax_nouderef", setup_pax_nouderef);
89249+
89250+#ifdef CONFIG_X86_64
89251+static int __init setup_pax_weakuderef(char *str)
89252+{
89253+ if (clone_pgd_mask != ~(pgdval_t)0UL)
89254+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
89255+ return 1;
89256+}
89257+__setup("pax_weakuderef", setup_pax_weakuderef);
89258+#endif
89259+#endif
89260+
89261+#ifdef CONFIG_PAX_SOFTMODE
89262+int pax_softmode;
89263+
89264+static int __init setup_pax_softmode(char *str)
89265+{
89266+ get_option(&str, &pax_softmode);
89267+ return 1;
89268+}
89269+__setup("pax_softmode=", setup_pax_softmode);
89270+#endif
89271+
89272 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
89273 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
89274 static const char *panic_later, *panic_param;
89275@@ -727,7 +798,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
89276 struct blacklist_entry *entry;
89277 char *fn_name;
89278
89279- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
89280+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
89281 if (!fn_name)
89282 return false;
89283
89284@@ -779,7 +850,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
89285 {
89286 int count = preempt_count();
89287 int ret;
89288- char msgbuf[64];
89289+ const char *msg1 = "", *msg2 = "";
89290
89291 if (initcall_blacklisted(fn))
89292 return -EPERM;
89293@@ -789,18 +860,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
89294 else
89295 ret = fn();
89296
89297- msgbuf[0] = 0;
89298-
89299 if (preempt_count() != count) {
89300- sprintf(msgbuf, "preemption imbalance ");
89301+ msg1 = " preemption imbalance";
89302 preempt_count_set(count);
89303 }
89304 if (irqs_disabled()) {
89305- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
89306+ msg2 = " disabled interrupts";
89307 local_irq_enable();
89308 }
89309- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
89310+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
89311
89312+ add_latent_entropy();
89313 return ret;
89314 }
89315
89316@@ -907,8 +977,8 @@ static int run_init_process(const char *init_filename)
89317 {
89318 argv_init[0] = init_filename;
89319 return do_execve(getname_kernel(init_filename),
89320- (const char __user *const __user *)argv_init,
89321- (const char __user *const __user *)envp_init);
89322+ (const char __user *const __force_user *)argv_init,
89323+ (const char __user *const __force_user *)envp_init);
89324 }
89325
89326 static int try_to_run_init_process(const char *init_filename)
89327@@ -925,6 +995,10 @@ static int try_to_run_init_process(const char *init_filename)
89328 return ret;
89329 }
89330
89331+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
89332+extern int gr_init_ran;
89333+#endif
89334+
89335 static noinline void __init kernel_init_freeable(void);
89336
89337 static int __ref kernel_init(void *unused)
89338@@ -949,6 +1023,11 @@ static int __ref kernel_init(void *unused)
89339 ramdisk_execute_command, ret);
89340 }
89341
89342+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
89343+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
89344+ gr_init_ran = 1;
89345+#endif
89346+
89347 /*
89348 * We try each of these until one succeeds.
89349 *
89350@@ -1004,7 +1083,7 @@ static noinline void __init kernel_init_freeable(void)
89351 do_basic_setup();
89352
89353 /* Open the /dev/console on the rootfs, this should never fail */
89354- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
89355+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
89356 pr_err("Warning: unable to open an initial console.\n");
89357
89358 (void) sys_dup(0);
89359@@ -1017,11 +1096,13 @@ static noinline void __init kernel_init_freeable(void)
89360 if (!ramdisk_execute_command)
89361 ramdisk_execute_command = "/init";
89362
89363- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
89364+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
89365 ramdisk_execute_command = NULL;
89366 prepare_namespace();
89367 }
89368
89369+ grsecurity_init();
89370+
89371 /*
89372 * Ok, we have completed the initial bootup, and
89373 * we're essentially up and running. Get rid of the
89374diff --git a/ipc/compat.c b/ipc/compat.c
89375index b5ef4f7..ff31d87 100644
89376--- a/ipc/compat.c
89377+++ b/ipc/compat.c
89378@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
89379 COMPAT_SHMLBA);
89380 if (err < 0)
89381 return err;
89382- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
89383+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
89384 }
89385 case SHMDT:
89386 return sys_shmdt(compat_ptr(ptr));
89387diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
89388index c3f0326..d4e0579 100644
89389--- a/ipc/ipc_sysctl.c
89390+++ b/ipc/ipc_sysctl.c
89391@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
89392 static int proc_ipc_dointvec(struct ctl_table *table, int write,
89393 void __user *buffer, size_t *lenp, loff_t *ppos)
89394 {
89395- struct ctl_table ipc_table;
89396+ ctl_table_no_const ipc_table;
89397
89398 memcpy(&ipc_table, table, sizeof(ipc_table));
89399 ipc_table.data = get_ipc(table);
89400@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
89401 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
89402 void __user *buffer, size_t *lenp, loff_t *ppos)
89403 {
89404- struct ctl_table ipc_table;
89405+ ctl_table_no_const ipc_table;
89406
89407 memcpy(&ipc_table, table, sizeof(ipc_table));
89408 ipc_table.data = get_ipc(table);
89409@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
89410 static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
89411 void __user *buffer, size_t *lenp, loff_t *ppos)
89412 {
89413- struct ctl_table ipc_table;
89414+ ctl_table_no_const ipc_table;
89415 size_t lenp_bef = *lenp;
89416 int rc;
89417
89418@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
89419 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
89420 void __user *buffer, size_t *lenp, loff_t *ppos)
89421 {
89422- struct ctl_table ipc_table;
89423+ ctl_table_no_const ipc_table;
89424 memcpy(&ipc_table, table, sizeof(ipc_table));
89425 ipc_table.data = get_ipc(table);
89426
89427@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
89428 static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write,
89429 void __user *buffer, size_t *lenp, loff_t *ppos)
89430 {
89431- struct ctl_table ipc_table;
89432+ ctl_table_no_const ipc_table;
89433 size_t lenp_bef = *lenp;
89434 int oldval;
89435 int rc;
89436diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
89437index 68d4e95..1477ded 100644
89438--- a/ipc/mq_sysctl.c
89439+++ b/ipc/mq_sysctl.c
89440@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
89441 static int proc_mq_dointvec(struct ctl_table *table, int write,
89442 void __user *buffer, size_t *lenp, loff_t *ppos)
89443 {
89444- struct ctl_table mq_table;
89445+ ctl_table_no_const mq_table;
89446 memcpy(&mq_table, table, sizeof(mq_table));
89447 mq_table.data = get_mq(table);
89448
89449@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
89450 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
89451 void __user *buffer, size_t *lenp, loff_t *ppos)
89452 {
89453- struct ctl_table mq_table;
89454+ ctl_table_no_const mq_table;
89455 memcpy(&mq_table, table, sizeof(mq_table));
89456 mq_table.data = get_mq(table);
89457
89458diff --git a/ipc/mqueue.c b/ipc/mqueue.c
89459index 4fcf39a..d3cc2ec 100644
89460--- a/ipc/mqueue.c
89461+++ b/ipc/mqueue.c
89462@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
89463 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
89464 info->attr.mq_msgsize);
89465
89466+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
89467 spin_lock(&mq_lock);
89468 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
89469 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
89470diff --git a/ipc/shm.c b/ipc/shm.c
89471index 89fc354..cf56786 100644
89472--- a/ipc/shm.c
89473+++ b/ipc/shm.c
89474@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
89475 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
89476 #endif
89477
89478+#ifdef CONFIG_GRKERNSEC
89479+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
89480+ const time_t shm_createtime, const kuid_t cuid,
89481+ const int shmid);
89482+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
89483+ const time_t shm_createtime);
89484+#endif
89485+
89486 void shm_init_ns(struct ipc_namespace *ns)
89487 {
89488 ns->shm_ctlmax = SHMMAX;
89489@@ -557,6 +565,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
89490 shp->shm_lprid = 0;
89491 shp->shm_atim = shp->shm_dtim = 0;
89492 shp->shm_ctim = get_seconds();
89493+#ifdef CONFIG_GRKERNSEC
89494+ {
89495+ struct timespec timeval;
89496+ do_posix_clock_monotonic_gettime(&timeval);
89497+
89498+ shp->shm_createtime = timeval.tv_sec;
89499+ }
89500+#endif
89501 shp->shm_segsz = size;
89502 shp->shm_nattch = 0;
89503 shp->shm_file = file;
89504@@ -1092,6 +1108,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
89505 f_mode = FMODE_READ | FMODE_WRITE;
89506 }
89507 if (shmflg & SHM_EXEC) {
89508+
89509+#ifdef CONFIG_PAX_MPROTECT
89510+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
89511+ goto out;
89512+#endif
89513+
89514 prot |= PROT_EXEC;
89515 acc_mode |= S_IXUGO;
89516 }
89517@@ -1116,6 +1138,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
89518 if (err)
89519 goto out_unlock;
89520
89521+#ifdef CONFIG_GRKERNSEC
89522+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
89523+ shp->shm_perm.cuid, shmid) ||
89524+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
89525+ err = -EACCES;
89526+ goto out_unlock;
89527+ }
89528+#endif
89529+
89530 ipc_lock_object(&shp->shm_perm);
89531
89532 /* check if shm_destroy() is tearing down shp */
89533@@ -1128,6 +1159,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
89534 path = shp->shm_file->f_path;
89535 path_get(&path);
89536 shp->shm_nattch++;
89537+#ifdef CONFIG_GRKERNSEC
89538+ shp->shm_lapid = current->pid;
89539+#endif
89540 size = i_size_read(path.dentry->d_inode);
89541 ipc_unlock_object(&shp->shm_perm);
89542 rcu_read_unlock();
89543diff --git a/ipc/util.c b/ipc/util.c
89544index 27d74e6..8be0be2 100644
89545--- a/ipc/util.c
89546+++ b/ipc/util.c
89547@@ -71,6 +71,8 @@ struct ipc_proc_iface {
89548 int (*show)(struct seq_file *, void *);
89549 };
89550
89551+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
89552+
89553 static void ipc_memory_notifier(struct work_struct *work)
89554 {
89555 ipcns_notify(IPCNS_MEMCHANGED);
89556@@ -537,6 +539,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
89557 granted_mode >>= 6;
89558 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
89559 granted_mode >>= 3;
89560+
89561+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
89562+ return -1;
89563+
89564 /* is there some bit set in requested_mode but not in granted_mode? */
89565 if ((requested_mode & ~granted_mode & 0007) &&
89566 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
89567diff --git a/kernel/acct.c b/kernel/acct.c
89568index 808a86f..da69695 100644
89569--- a/kernel/acct.c
89570+++ b/kernel/acct.c
89571@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
89572 */
89573 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
89574 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
89575- file->f_op->write(file, (char *)&ac,
89576+ file->f_op->write(file, (char __force_user *)&ac,
89577 sizeof(acct_t), &file->f_pos);
89578 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
89579 set_fs(fs);
89580diff --git a/kernel/audit.c b/kernel/audit.c
89581index 3ef2e0e..8873765 100644
89582--- a/kernel/audit.c
89583+++ b/kernel/audit.c
89584@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
89585 3) suppressed due to audit_rate_limit
89586 4) suppressed due to audit_backlog_limit
89587 */
89588-static atomic_t audit_lost = ATOMIC_INIT(0);
89589+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
89590
89591 /* The netlink socket. */
89592 static struct sock *audit_sock;
89593@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
89594 unsigned long now;
89595 int print;
89596
89597- atomic_inc(&audit_lost);
89598+ atomic_inc_unchecked(&audit_lost);
89599
89600 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
89601
89602@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
89603 if (print) {
89604 if (printk_ratelimit())
89605 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
89606- atomic_read(&audit_lost),
89607+ atomic_read_unchecked(&audit_lost),
89608 audit_rate_limit,
89609 audit_backlog_limit);
89610 audit_panic(message);
89611@@ -840,7 +840,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
89612 s.pid = audit_pid;
89613 s.rate_limit = audit_rate_limit;
89614 s.backlog_limit = audit_backlog_limit;
89615- s.lost = atomic_read(&audit_lost);
89616+ s.lost = atomic_read_unchecked(&audit_lost);
89617 s.backlog = skb_queue_len(&audit_skb_queue);
89618 s.version = AUDIT_VERSION_LATEST;
89619 s.backlog_wait_time = audit_backlog_wait_time;
89620diff --git a/kernel/auditsc.c b/kernel/auditsc.c
89621index 21eae3c..66db239 100644
89622--- a/kernel/auditsc.c
89623+++ b/kernel/auditsc.c
89624@@ -2023,7 +2023,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
89625 }
89626
89627 /* global counter which is incremented every time something logs in */
89628-static atomic_t session_id = ATOMIC_INIT(0);
89629+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
89630
89631 static int audit_set_loginuid_perm(kuid_t loginuid)
89632 {
89633@@ -2090,7 +2090,7 @@ int audit_set_loginuid(kuid_t loginuid)
89634
89635 /* are we setting or clearing? */
89636 if (uid_valid(loginuid))
89637- sessionid = (unsigned int)atomic_inc_return(&session_id);
89638+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
89639
89640 task->sessionid = sessionid;
89641 task->loginuid = loginuid;
89642diff --git a/kernel/capability.c b/kernel/capability.c
89643index a5cf13c..07a2647 100644
89644--- a/kernel/capability.c
89645+++ b/kernel/capability.c
89646@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
89647 * before modification is attempted and the application
89648 * fails.
89649 */
89650+ if (tocopy > ARRAY_SIZE(kdata))
89651+ return -EFAULT;
89652+
89653 if (copy_to_user(dataptr, kdata, tocopy
89654 * sizeof(struct __user_cap_data_struct))) {
89655 return -EFAULT;
89656@@ -293,10 +296,11 @@ bool has_ns_capability(struct task_struct *t,
89657 int ret;
89658
89659 rcu_read_lock();
89660- ret = security_capable(__task_cred(t), ns, cap);
89661+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
89662+ gr_task_is_capable(t, __task_cred(t), cap);
89663 rcu_read_unlock();
89664
89665- return (ret == 0);
89666+ return ret;
89667 }
89668
89669 /**
89670@@ -333,10 +337,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
89671 int ret;
89672
89673 rcu_read_lock();
89674- ret = security_capable_noaudit(__task_cred(t), ns, cap);
89675+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
89676 rcu_read_unlock();
89677
89678- return (ret == 0);
89679+ return ret;
89680 }
89681
89682 /**
89683@@ -374,7 +378,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
89684 BUG();
89685 }
89686
89687- if (security_capable(current_cred(), ns, cap) == 0) {
89688+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
89689 current->flags |= PF_SUPERPRIV;
89690 return true;
89691 }
89692@@ -382,6 +386,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
89693 }
89694 EXPORT_SYMBOL(ns_capable);
89695
89696+bool ns_capable_nolog(struct user_namespace *ns, int cap)
89697+{
89698+ if (unlikely(!cap_valid(cap))) {
89699+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
89700+ BUG();
89701+ }
89702+
89703+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
89704+ current->flags |= PF_SUPERPRIV;
89705+ return true;
89706+ }
89707+ return false;
89708+}
89709+EXPORT_SYMBOL(ns_capable_nolog);
89710+
89711 /**
89712 * file_ns_capable - Determine if the file's opener had a capability in effect
89713 * @file: The file we want to check
89714@@ -423,6 +442,12 @@ bool capable(int cap)
89715 }
89716 EXPORT_SYMBOL(capable);
89717
89718+bool capable_nolog(int cap)
89719+{
89720+ return ns_capable_nolog(&init_user_ns, cap);
89721+}
89722+EXPORT_SYMBOL(capable_nolog);
89723+
89724 /**
89725 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
89726 * @inode: The inode in question
89727@@ -440,3 +465,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
89728 kgid_has_mapping(ns, inode->i_gid);
89729 }
89730 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
89731+
89732+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
89733+{
89734+ struct user_namespace *ns = current_user_ns();
89735+
89736+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
89737+ kgid_has_mapping(ns, inode->i_gid);
89738+}
89739+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
89740diff --git a/kernel/cgroup.c b/kernel/cgroup.c
89741index 70776ae..09c4988 100644
89742--- a/kernel/cgroup.c
89743+++ b/kernel/cgroup.c
89744@@ -5146,6 +5146,14 @@ static void cgroup_release_agent(struct work_struct *work)
89745 release_list);
89746 list_del_init(&cgrp->release_list);
89747 raw_spin_unlock(&release_list_lock);
89748+
89749+ /*
89750+ * don't bother calling call_usermodehelper if we haven't
89751+ * configured a binary to execute
89752+ */
89753+ if (cgrp->root->release_agent_path[0] == '\0')
89754+ goto continue_free;
89755+
89756 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
89757 if (!pathbuf)
89758 goto continue_free;
89759@@ -5336,7 +5344,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
89760 struct task_struct *task;
89761 int count = 0;
89762
89763- seq_printf(seq, "css_set %p\n", cset);
89764+ seq_printf(seq, "css_set %pK\n", cset);
89765
89766 list_for_each_entry(task, &cset->tasks, cg_list) {
89767 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
89768diff --git a/kernel/compat.c b/kernel/compat.c
89769index 633394f..bdfa969 100644
89770--- a/kernel/compat.c
89771+++ b/kernel/compat.c
89772@@ -13,6 +13,7 @@
89773
89774 #include <linux/linkage.h>
89775 #include <linux/compat.h>
89776+#include <linux/module.h>
89777 #include <linux/errno.h>
89778 #include <linux/time.h>
89779 #include <linux/signal.h>
89780@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
89781 mm_segment_t oldfs;
89782 long ret;
89783
89784- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
89785+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
89786 oldfs = get_fs();
89787 set_fs(KERNEL_DS);
89788 ret = hrtimer_nanosleep_restart(restart);
89789@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
89790 oldfs = get_fs();
89791 set_fs(KERNEL_DS);
89792 ret = hrtimer_nanosleep(&tu,
89793- rmtp ? (struct timespec __user *)&rmt : NULL,
89794+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
89795 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
89796 set_fs(oldfs);
89797
89798@@ -361,7 +362,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
89799 mm_segment_t old_fs = get_fs();
89800
89801 set_fs(KERNEL_DS);
89802- ret = sys_sigpending((old_sigset_t __user *) &s);
89803+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
89804 set_fs(old_fs);
89805 if (ret == 0)
89806 ret = put_user(s, set);
89807@@ -451,7 +452,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
89808 mm_segment_t old_fs = get_fs();
89809
89810 set_fs(KERNEL_DS);
89811- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
89812+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
89813 set_fs(old_fs);
89814
89815 if (!ret) {
89816@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
89817 set_fs (KERNEL_DS);
89818 ret = sys_wait4(pid,
89819 (stat_addr ?
89820- (unsigned int __user *) &status : NULL),
89821- options, (struct rusage __user *) &r);
89822+ (unsigned int __force_user *) &status : NULL),
89823+ options, (struct rusage __force_user *) &r);
89824 set_fs (old_fs);
89825
89826 if (ret > 0) {
89827@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
89828 memset(&info, 0, sizeof(info));
89829
89830 set_fs(KERNEL_DS);
89831- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
89832- uru ? (struct rusage __user *)&ru : NULL);
89833+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
89834+ uru ? (struct rusage __force_user *)&ru : NULL);
89835 set_fs(old_fs);
89836
89837 if ((ret < 0) || (info.si_signo == 0))
89838@@ -695,8 +696,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
89839 oldfs = get_fs();
89840 set_fs(KERNEL_DS);
89841 err = sys_timer_settime(timer_id, flags,
89842- (struct itimerspec __user *) &newts,
89843- (struct itimerspec __user *) &oldts);
89844+ (struct itimerspec __force_user *) &newts,
89845+ (struct itimerspec __force_user *) &oldts);
89846 set_fs(oldfs);
89847 if (!err && old && put_compat_itimerspec(old, &oldts))
89848 return -EFAULT;
89849@@ -713,7 +714,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
89850 oldfs = get_fs();
89851 set_fs(KERNEL_DS);
89852 err = sys_timer_gettime(timer_id,
89853- (struct itimerspec __user *) &ts);
89854+ (struct itimerspec __force_user *) &ts);
89855 set_fs(oldfs);
89856 if (!err && put_compat_itimerspec(setting, &ts))
89857 return -EFAULT;
89858@@ -732,7 +733,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
89859 oldfs = get_fs();
89860 set_fs(KERNEL_DS);
89861 err = sys_clock_settime(which_clock,
89862- (struct timespec __user *) &ts);
89863+ (struct timespec __force_user *) &ts);
89864 set_fs(oldfs);
89865 return err;
89866 }
89867@@ -747,7 +748,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
89868 oldfs = get_fs();
89869 set_fs(KERNEL_DS);
89870 err = sys_clock_gettime(which_clock,
89871- (struct timespec __user *) &ts);
89872+ (struct timespec __force_user *) &ts);
89873 set_fs(oldfs);
89874 if (!err && compat_put_timespec(&ts, tp))
89875 return -EFAULT;
89876@@ -767,7 +768,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
89877
89878 oldfs = get_fs();
89879 set_fs(KERNEL_DS);
89880- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
89881+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
89882 set_fs(oldfs);
89883
89884 err = compat_put_timex(utp, &txc);
89885@@ -787,7 +788,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
89886 oldfs = get_fs();
89887 set_fs(KERNEL_DS);
89888 err = sys_clock_getres(which_clock,
89889- (struct timespec __user *) &ts);
89890+ (struct timespec __force_user *) &ts);
89891 set_fs(oldfs);
89892 if (!err && tp && compat_put_timespec(&ts, tp))
89893 return -EFAULT;
89894@@ -801,7 +802,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
89895 struct timespec tu;
89896 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
89897
89898- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
89899+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
89900 oldfs = get_fs();
89901 set_fs(KERNEL_DS);
89902 err = clock_nanosleep_restart(restart);
89903@@ -833,8 +834,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
89904 oldfs = get_fs();
89905 set_fs(KERNEL_DS);
89906 err = sys_clock_nanosleep(which_clock, flags,
89907- (struct timespec __user *) &in,
89908- (struct timespec __user *) &out);
89909+ (struct timespec __force_user *) &in,
89910+ (struct timespec __force_user *) &out);
89911 set_fs(oldfs);
89912
89913 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
89914@@ -1128,7 +1129,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
89915 mm_segment_t old_fs = get_fs();
89916
89917 set_fs(KERNEL_DS);
89918- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
89919+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
89920 set_fs(old_fs);
89921 if (compat_put_timespec(&t, interval))
89922 return -EFAULT;
89923diff --git a/kernel/configs.c b/kernel/configs.c
89924index c18b1f1..b9a0132 100644
89925--- a/kernel/configs.c
89926+++ b/kernel/configs.c
89927@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
89928 struct proc_dir_entry *entry;
89929
89930 /* create the current config file */
89931+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
89932+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
89933+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
89934+ &ikconfig_file_ops);
89935+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
89936+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
89937+ &ikconfig_file_ops);
89938+#endif
89939+#else
89940 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
89941 &ikconfig_file_ops);
89942+#endif
89943+
89944 if (!entry)
89945 return -ENOMEM;
89946
89947diff --git a/kernel/cred.c b/kernel/cred.c
89948index e0573a4..26c0fd3 100644
89949--- a/kernel/cred.c
89950+++ b/kernel/cred.c
89951@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
89952 validate_creds(cred);
89953 alter_cred_subscribers(cred, -1);
89954 put_cred(cred);
89955+
89956+#ifdef CONFIG_GRKERNSEC_SETXID
89957+ cred = (struct cred *) tsk->delayed_cred;
89958+ if (cred != NULL) {
89959+ tsk->delayed_cred = NULL;
89960+ validate_creds(cred);
89961+ alter_cred_subscribers(cred, -1);
89962+ put_cred(cred);
89963+ }
89964+#endif
89965 }
89966
89967 /**
89968@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
89969 * Always returns 0 thus allowing this function to be tail-called at the end
89970 * of, say, sys_setgid().
89971 */
89972-int commit_creds(struct cred *new)
89973+static int __commit_creds(struct cred *new)
89974 {
89975 struct task_struct *task = current;
89976 const struct cred *old = task->real_cred;
89977@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
89978
89979 get_cred(new); /* we will require a ref for the subj creds too */
89980
89981+ gr_set_role_label(task, new->uid, new->gid);
89982+
89983 /* dumpability changes */
89984 if (!uid_eq(old->euid, new->euid) ||
89985 !gid_eq(old->egid, new->egid) ||
89986@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
89987 put_cred(old);
89988 return 0;
89989 }
89990+#ifdef CONFIG_GRKERNSEC_SETXID
89991+extern int set_user(struct cred *new);
89992+
89993+void gr_delayed_cred_worker(void)
89994+{
89995+ const struct cred *new = current->delayed_cred;
89996+ struct cred *ncred;
89997+
89998+ current->delayed_cred = NULL;
89999+
90000+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
90001+ // from doing get_cred on it when queueing this
90002+ put_cred(new);
90003+ return;
90004+ } else if (new == NULL)
90005+ return;
90006+
90007+ ncred = prepare_creds();
90008+ if (!ncred)
90009+ goto die;
90010+ // uids
90011+ ncred->uid = new->uid;
90012+ ncred->euid = new->euid;
90013+ ncred->suid = new->suid;
90014+ ncred->fsuid = new->fsuid;
90015+ // gids
90016+ ncred->gid = new->gid;
90017+ ncred->egid = new->egid;
90018+ ncred->sgid = new->sgid;
90019+ ncred->fsgid = new->fsgid;
90020+ // groups
90021+ set_groups(ncred, new->group_info);
90022+ // caps
90023+ ncred->securebits = new->securebits;
90024+ ncred->cap_inheritable = new->cap_inheritable;
90025+ ncred->cap_permitted = new->cap_permitted;
90026+ ncred->cap_effective = new->cap_effective;
90027+ ncred->cap_bset = new->cap_bset;
90028+
90029+ if (set_user(ncred)) {
90030+ abort_creds(ncred);
90031+ goto die;
90032+ }
90033+
90034+ // from doing get_cred on it when queueing this
90035+ put_cred(new);
90036+
90037+ __commit_creds(ncred);
90038+ return;
90039+die:
90040+ // from doing get_cred on it when queueing this
90041+ put_cred(new);
90042+ do_group_exit(SIGKILL);
90043+}
90044+#endif
90045+
90046+int commit_creds(struct cred *new)
90047+{
90048+#ifdef CONFIG_GRKERNSEC_SETXID
90049+ int ret;
90050+ int schedule_it = 0;
90051+ struct task_struct *t;
90052+ unsigned oldsecurebits = current_cred()->securebits;
90053+
90054+ /* we won't get called with tasklist_lock held for writing
90055+ and interrupts disabled as the cred struct in that case is
90056+ init_cred
90057+ */
90058+ if (grsec_enable_setxid && !current_is_single_threaded() &&
90059+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
90060+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
90061+ schedule_it = 1;
90062+ }
90063+ ret = __commit_creds(new);
90064+ if (schedule_it) {
90065+ rcu_read_lock();
90066+ read_lock(&tasklist_lock);
90067+ for (t = next_thread(current); t != current;
90068+ t = next_thread(t)) {
90069+ /* we'll check if the thread has uid 0 in
90070+ * the delayed worker routine
90071+ */
90072+ if (task_securebits(t) == oldsecurebits &&
90073+ t->delayed_cred == NULL) {
90074+ t->delayed_cred = get_cred(new);
90075+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
90076+ set_tsk_need_resched(t);
90077+ }
90078+ }
90079+ read_unlock(&tasklist_lock);
90080+ rcu_read_unlock();
90081+ }
90082+
90083+ return ret;
90084+#else
90085+ return __commit_creds(new);
90086+#endif
90087+}
90088+
90089 EXPORT_SYMBOL(commit_creds);
90090
90091 /**
90092diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
90093index 1adf62b..7736e06 100644
90094--- a/kernel/debug/debug_core.c
90095+++ b/kernel/debug/debug_core.c
90096@@ -124,7 +124,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
90097 */
90098 static atomic_t masters_in_kgdb;
90099 static atomic_t slaves_in_kgdb;
90100-static atomic_t kgdb_break_tasklet_var;
90101+static atomic_unchecked_t kgdb_break_tasklet_var;
90102 atomic_t kgdb_setting_breakpoint;
90103
90104 struct task_struct *kgdb_usethread;
90105@@ -134,7 +134,7 @@ int kgdb_single_step;
90106 static pid_t kgdb_sstep_pid;
90107
90108 /* to keep track of the CPU which is doing the single stepping*/
90109-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
90110+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
90111
90112 /*
90113 * If you are debugging a problem where roundup (the collection of
90114@@ -549,7 +549,7 @@ return_normal:
90115 * kernel will only try for the value of sstep_tries before
90116 * giving up and continuing on.
90117 */
90118- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
90119+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
90120 (kgdb_info[cpu].task &&
90121 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
90122 atomic_set(&kgdb_active, -1);
90123@@ -647,8 +647,8 @@ cpu_master_loop:
90124 }
90125
90126 kgdb_restore:
90127- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
90128- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
90129+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
90130+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
90131 if (kgdb_info[sstep_cpu].task)
90132 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
90133 else
90134@@ -925,18 +925,18 @@ static void kgdb_unregister_callbacks(void)
90135 static void kgdb_tasklet_bpt(unsigned long ing)
90136 {
90137 kgdb_breakpoint();
90138- atomic_set(&kgdb_break_tasklet_var, 0);
90139+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
90140 }
90141
90142 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
90143
90144 void kgdb_schedule_breakpoint(void)
90145 {
90146- if (atomic_read(&kgdb_break_tasklet_var) ||
90147+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
90148 atomic_read(&kgdb_active) != -1 ||
90149 atomic_read(&kgdb_setting_breakpoint))
90150 return;
90151- atomic_inc(&kgdb_break_tasklet_var);
90152+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
90153 tasklet_schedule(&kgdb_tasklet_breakpoint);
90154 }
90155 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
90156diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
90157index 2f7c760..95b6a66 100644
90158--- a/kernel/debug/kdb/kdb_main.c
90159+++ b/kernel/debug/kdb/kdb_main.c
90160@@ -1977,7 +1977,7 @@ static int kdb_lsmod(int argc, const char **argv)
90161 continue;
90162
90163 kdb_printf("%-20s%8u 0x%p ", mod->name,
90164- mod->core_size, (void *)mod);
90165+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
90166 #ifdef CONFIG_MODULE_UNLOAD
90167 kdb_printf("%4ld ", module_refcount(mod));
90168 #endif
90169@@ -1987,7 +1987,7 @@ static int kdb_lsmod(int argc, const char **argv)
90170 kdb_printf(" (Loading)");
90171 else
90172 kdb_printf(" (Live)");
90173- kdb_printf(" 0x%p", mod->module_core);
90174+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
90175
90176 #ifdef CONFIG_MODULE_UNLOAD
90177 {
90178diff --git a/kernel/events/core.c b/kernel/events/core.c
90179index 6b17ac1..00fd505 100644
90180--- a/kernel/events/core.c
90181+++ b/kernel/events/core.c
90182@@ -160,8 +160,15 @@ static struct srcu_struct pmus_srcu;
90183 * 0 - disallow raw tracepoint access for unpriv
90184 * 1 - disallow cpu events for unpriv
90185 * 2 - disallow kernel profiling for unpriv
90186+ * 3 - disallow all unpriv perf event use
90187 */
90188-int sysctl_perf_event_paranoid __read_mostly = 1;
90189+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
90190+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
90191+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
90192+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
90193+#else
90194+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
90195+#endif
90196
90197 /* Minimum for 512 kiB + 1 user control page */
90198 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
90199@@ -187,7 +194,7 @@ void update_perf_cpu_limits(void)
90200
90201 tmp *= sysctl_perf_cpu_time_max_percent;
90202 do_div(tmp, 100);
90203- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
90204+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
90205 }
90206
90207 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
90208@@ -293,7 +300,7 @@ void perf_sample_event_took(u64 sample_len_ns)
90209 }
90210 }
90211
90212-static atomic64_t perf_event_id;
90213+static atomic64_unchecked_t perf_event_id;
90214
90215 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
90216 enum event_type_t event_type);
90217@@ -3023,7 +3030,7 @@ static void __perf_event_read(void *info)
90218
90219 static inline u64 perf_event_count(struct perf_event *event)
90220 {
90221- return local64_read(&event->count) + atomic64_read(&event->child_count);
90222+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
90223 }
90224
90225 static u64 perf_event_read(struct perf_event *event)
90226@@ -3399,9 +3406,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
90227 mutex_lock(&event->child_mutex);
90228 total += perf_event_read(event);
90229 *enabled += event->total_time_enabled +
90230- atomic64_read(&event->child_total_time_enabled);
90231+ atomic64_read_unchecked(&event->child_total_time_enabled);
90232 *running += event->total_time_running +
90233- atomic64_read(&event->child_total_time_running);
90234+ atomic64_read_unchecked(&event->child_total_time_running);
90235
90236 list_for_each_entry(child, &event->child_list, child_list) {
90237 total += perf_event_read(child);
90238@@ -3830,10 +3837,10 @@ void perf_event_update_userpage(struct perf_event *event)
90239 userpg->offset -= local64_read(&event->hw.prev_count);
90240
90241 userpg->time_enabled = enabled +
90242- atomic64_read(&event->child_total_time_enabled);
90243+ atomic64_read_unchecked(&event->child_total_time_enabled);
90244
90245 userpg->time_running = running +
90246- atomic64_read(&event->child_total_time_running);
90247+ atomic64_read_unchecked(&event->child_total_time_running);
90248
90249 arch_perf_update_userpage(userpg, now);
90250
90251@@ -4397,7 +4404,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
90252
90253 /* Data. */
90254 sp = perf_user_stack_pointer(regs);
90255- rem = __output_copy_user(handle, (void *) sp, dump_size);
90256+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
90257 dyn_size = dump_size - rem;
90258
90259 perf_output_skip(handle, rem);
90260@@ -4488,11 +4495,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
90261 values[n++] = perf_event_count(event);
90262 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
90263 values[n++] = enabled +
90264- atomic64_read(&event->child_total_time_enabled);
90265+ atomic64_read_unchecked(&event->child_total_time_enabled);
90266 }
90267 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
90268 values[n++] = running +
90269- atomic64_read(&event->child_total_time_running);
90270+ atomic64_read_unchecked(&event->child_total_time_running);
90271 }
90272 if (read_format & PERF_FORMAT_ID)
90273 values[n++] = primary_event_id(event);
90274@@ -6801,7 +6808,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
90275 event->parent = parent_event;
90276
90277 event->ns = get_pid_ns(task_active_pid_ns(current));
90278- event->id = atomic64_inc_return(&perf_event_id);
90279+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
90280
90281 event->state = PERF_EVENT_STATE_INACTIVE;
90282
90283@@ -7080,6 +7087,11 @@ SYSCALL_DEFINE5(perf_event_open,
90284 if (flags & ~PERF_FLAG_ALL)
90285 return -EINVAL;
90286
90287+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
90288+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
90289+ return -EACCES;
90290+#endif
90291+
90292 err = perf_copy_attr(attr_uptr, &attr);
90293 if (err)
90294 return err;
90295@@ -7432,10 +7444,10 @@ static void sync_child_event(struct perf_event *child_event,
90296 /*
90297 * Add back the child's count to the parent's count:
90298 */
90299- atomic64_add(child_val, &parent_event->child_count);
90300- atomic64_add(child_event->total_time_enabled,
90301+ atomic64_add_unchecked(child_val, &parent_event->child_count);
90302+ atomic64_add_unchecked(child_event->total_time_enabled,
90303 &parent_event->child_total_time_enabled);
90304- atomic64_add(child_event->total_time_running,
90305+ atomic64_add_unchecked(child_event->total_time_running,
90306 &parent_event->child_total_time_running);
90307
90308 /*
90309diff --git a/kernel/events/internal.h b/kernel/events/internal.h
90310index 569b2187..19940d9 100644
90311--- a/kernel/events/internal.h
90312+++ b/kernel/events/internal.h
90313@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
90314 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
90315 }
90316
90317-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
90318+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
90319 static inline unsigned long \
90320 func_name(struct perf_output_handle *handle, \
90321- const void *buf, unsigned long len) \
90322+ const void user *buf, unsigned long len) \
90323 { \
90324 unsigned long size, written; \
90325 \
90326@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
90327 return 0;
90328 }
90329
90330-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
90331+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
90332
90333 static inline unsigned long
90334 memcpy_skip(void *dst, const void *src, unsigned long n)
90335@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
90336 return 0;
90337 }
90338
90339-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
90340+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
90341
90342 #ifndef arch_perf_out_copy_user
90343 #define arch_perf_out_copy_user arch_perf_out_copy_user
90344@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
90345 }
90346 #endif
90347
90348-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
90349+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
90350
90351 /* Callchain handling */
90352 extern struct perf_callchain_entry *
90353diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
90354index 6f3254e..e4c1fe4 100644
90355--- a/kernel/events/uprobes.c
90356+++ b/kernel/events/uprobes.c
90357@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
90358 {
90359 struct page *page;
90360 uprobe_opcode_t opcode;
90361- int result;
90362+ long result;
90363
90364 pagefault_disable();
90365 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
90366diff --git a/kernel/exit.c b/kernel/exit.c
90367index e5c4668..592d2e5 100644
90368--- a/kernel/exit.c
90369+++ b/kernel/exit.c
90370@@ -173,6 +173,10 @@ void release_task(struct task_struct * p)
90371 struct task_struct *leader;
90372 int zap_leader;
90373 repeat:
90374+#ifdef CONFIG_NET
90375+ gr_del_task_from_ip_table(p);
90376+#endif
90377+
90378 /* don't need to get the RCU readlock here - the process is dead and
90379 * can't be modifying its own credentials. But shut RCU-lockdep up */
90380 rcu_read_lock();
90381@@ -664,6 +668,8 @@ void do_exit(long code)
90382 struct task_struct *tsk = current;
90383 int group_dead;
90384
90385+ set_fs(USER_DS);
90386+
90387 profile_task_exit(tsk);
90388
90389 WARN_ON(blk_needs_flush_plug(tsk));
90390@@ -680,7 +686,6 @@ void do_exit(long code)
90391 * mm_release()->clear_child_tid() from writing to a user-controlled
90392 * kernel address.
90393 */
90394- set_fs(USER_DS);
90395
90396 ptrace_event(PTRACE_EVENT_EXIT, code);
90397
90398@@ -739,6 +744,9 @@ void do_exit(long code)
90399 tsk->exit_code = code;
90400 taskstats_exit(tsk, group_dead);
90401
90402+ gr_acl_handle_psacct(tsk, code);
90403+ gr_acl_handle_exit();
90404+
90405 exit_mm(tsk);
90406
90407 if (group_dead)
90408@@ -858,7 +866,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
90409 * Take down every thread in the group. This is called by fatal signals
90410 * as well as by sys_exit_group (below).
90411 */
90412-void
90413+__noreturn void
90414 do_group_exit(int exit_code)
90415 {
90416 struct signal_struct *sig = current->signal;
90417diff --git a/kernel/fork.c b/kernel/fork.c
90418index 6a13c46..a623c8e 100644
90419--- a/kernel/fork.c
90420+++ b/kernel/fork.c
90421@@ -183,6 +183,48 @@ void thread_info_cache_init(void)
90422 # endif
90423 #endif
90424
90425+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
90426+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
90427+ int node, void **lowmem_stack)
90428+{
90429+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
90430+ void *ret = NULL;
90431+ unsigned int i;
90432+
90433+ *lowmem_stack = alloc_thread_info_node(tsk, node);
90434+ if (*lowmem_stack == NULL)
90435+ goto out;
90436+
90437+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
90438+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
90439+
90440+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
90441+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
90442+ if (ret == NULL) {
90443+ free_thread_info(*lowmem_stack);
90444+ *lowmem_stack = NULL;
90445+ }
90446+
90447+out:
90448+ return ret;
90449+}
90450+
90451+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
90452+{
90453+ unmap_process_stacks(tsk);
90454+}
90455+#else
90456+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
90457+ int node, void **lowmem_stack)
90458+{
90459+ return alloc_thread_info_node(tsk, node);
90460+}
90461+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
90462+{
90463+ free_thread_info(ti);
90464+}
90465+#endif
90466+
90467 /* SLAB cache for signal_struct structures (tsk->signal) */
90468 static struct kmem_cache *signal_cachep;
90469
90470@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
90471 /* SLAB cache for mm_struct structures (tsk->mm) */
90472 static struct kmem_cache *mm_cachep;
90473
90474-static void account_kernel_stack(struct thread_info *ti, int account)
90475+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
90476 {
90477+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
90478+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
90479+#else
90480 struct zone *zone = page_zone(virt_to_page(ti));
90481+#endif
90482
90483 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
90484 }
90485
90486 void free_task(struct task_struct *tsk)
90487 {
90488- account_kernel_stack(tsk->stack, -1);
90489+ account_kernel_stack(tsk, tsk->stack, -1);
90490 arch_release_thread_info(tsk->stack);
90491- free_thread_info(tsk->stack);
90492+ gr_free_thread_info(tsk, tsk->stack);
90493 rt_mutex_debug_task_free(tsk);
90494 ftrace_graph_exit_task(tsk);
90495 put_seccomp_filter(tsk);
90496@@ -299,6 +345,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90497 struct task_struct *tsk;
90498 struct thread_info *ti;
90499 unsigned long *stackend;
90500+ void *lowmem_stack;
90501 int node = tsk_fork_get_node(orig);
90502 int err;
90503
90504@@ -306,7 +353,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90505 if (!tsk)
90506 return NULL;
90507
90508- ti = alloc_thread_info_node(tsk, node);
90509+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
90510 if (!ti)
90511 goto free_tsk;
90512
90513@@ -315,6 +362,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90514 goto free_ti;
90515
90516 tsk->stack = ti;
90517+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
90518+ tsk->lowmem_stack = lowmem_stack;
90519+#endif
90520
90521 setup_thread_stack(tsk, orig);
90522 clear_user_return_notifier(tsk);
90523@@ -323,7 +373,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90524 *stackend = STACK_END_MAGIC; /* for overflow detection */
90525
90526 #ifdef CONFIG_CC_STACKPROTECTOR
90527- tsk->stack_canary = get_random_int();
90528+ tsk->stack_canary = pax_get_random_long();
90529 #endif
90530
90531 /*
90532@@ -337,24 +387,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90533 tsk->splice_pipe = NULL;
90534 tsk->task_frag.page = NULL;
90535
90536- account_kernel_stack(ti, 1);
90537+ account_kernel_stack(tsk, ti, 1);
90538
90539 return tsk;
90540
90541 free_ti:
90542- free_thread_info(ti);
90543+ gr_free_thread_info(tsk, ti);
90544 free_tsk:
90545 free_task_struct(tsk);
90546 return NULL;
90547 }
90548
90549 #ifdef CONFIG_MMU
90550-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90551+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
90552+{
90553+ struct vm_area_struct *tmp;
90554+ unsigned long charge;
90555+ struct file *file;
90556+ int retval;
90557+
90558+ charge = 0;
90559+ if (mpnt->vm_flags & VM_ACCOUNT) {
90560+ unsigned long len = vma_pages(mpnt);
90561+
90562+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
90563+ goto fail_nomem;
90564+ charge = len;
90565+ }
90566+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90567+ if (!tmp)
90568+ goto fail_nomem;
90569+ *tmp = *mpnt;
90570+ tmp->vm_mm = mm;
90571+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
90572+ retval = vma_dup_policy(mpnt, tmp);
90573+ if (retval)
90574+ goto fail_nomem_policy;
90575+ if (anon_vma_fork(tmp, mpnt))
90576+ goto fail_nomem_anon_vma_fork;
90577+ tmp->vm_flags &= ~VM_LOCKED;
90578+ tmp->vm_next = tmp->vm_prev = NULL;
90579+ tmp->vm_mirror = NULL;
90580+ file = tmp->vm_file;
90581+ if (file) {
90582+ struct inode *inode = file_inode(file);
90583+ struct address_space *mapping = file->f_mapping;
90584+
90585+ get_file(file);
90586+ if (tmp->vm_flags & VM_DENYWRITE)
90587+ atomic_dec(&inode->i_writecount);
90588+ mutex_lock(&mapping->i_mmap_mutex);
90589+ if (tmp->vm_flags & VM_SHARED)
90590+ mapping->i_mmap_writable++;
90591+ flush_dcache_mmap_lock(mapping);
90592+ /* insert tmp into the share list, just after mpnt */
90593+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
90594+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
90595+ else
90596+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
90597+ flush_dcache_mmap_unlock(mapping);
90598+ mutex_unlock(&mapping->i_mmap_mutex);
90599+ }
90600+
90601+ /*
90602+ * Clear hugetlb-related page reserves for children. This only
90603+ * affects MAP_PRIVATE mappings. Faults generated by the child
90604+ * are not guaranteed to succeed, even if read-only
90605+ */
90606+ if (is_vm_hugetlb_page(tmp))
90607+ reset_vma_resv_huge_pages(tmp);
90608+
90609+ return tmp;
90610+
90611+fail_nomem_anon_vma_fork:
90612+ mpol_put(vma_policy(tmp));
90613+fail_nomem_policy:
90614+ kmem_cache_free(vm_area_cachep, tmp);
90615+fail_nomem:
90616+ vm_unacct_memory(charge);
90617+ return NULL;
90618+}
90619+
90620+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90621 {
90622 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
90623 struct rb_node **rb_link, *rb_parent;
90624 int retval;
90625- unsigned long charge;
90626
90627 uprobe_start_dup_mmap();
90628 down_write(&oldmm->mmap_sem);
90629@@ -383,55 +501,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90630
90631 prev = NULL;
90632 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
90633- struct file *file;
90634-
90635 if (mpnt->vm_flags & VM_DONTCOPY) {
90636 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
90637 -vma_pages(mpnt));
90638 continue;
90639 }
90640- charge = 0;
90641- if (mpnt->vm_flags & VM_ACCOUNT) {
90642- unsigned long len = vma_pages(mpnt);
90643-
90644- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
90645- goto fail_nomem;
90646- charge = len;
90647- }
90648- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90649- if (!tmp)
90650- goto fail_nomem;
90651- *tmp = *mpnt;
90652- INIT_LIST_HEAD(&tmp->anon_vma_chain);
90653- retval = vma_dup_policy(mpnt, tmp);
90654- if (retval)
90655- goto fail_nomem_policy;
90656- tmp->vm_mm = mm;
90657- if (anon_vma_fork(tmp, mpnt))
90658- goto fail_nomem_anon_vma_fork;
90659- tmp->vm_flags &= ~VM_LOCKED;
90660- tmp->vm_next = tmp->vm_prev = NULL;
90661- file = tmp->vm_file;
90662- if (file) {
90663- struct inode *inode = file_inode(file);
90664- struct address_space *mapping = file->f_mapping;
90665-
90666- get_file(file);
90667- if (tmp->vm_flags & VM_DENYWRITE)
90668- atomic_dec(&inode->i_writecount);
90669- mutex_lock(&mapping->i_mmap_mutex);
90670- if (tmp->vm_flags & VM_SHARED)
90671- mapping->i_mmap_writable++;
90672- flush_dcache_mmap_lock(mapping);
90673- /* insert tmp into the share list, just after mpnt */
90674- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
90675- vma_nonlinear_insert(tmp,
90676- &mapping->i_mmap_nonlinear);
90677- else
90678- vma_interval_tree_insert_after(tmp, mpnt,
90679- &mapping->i_mmap);
90680- flush_dcache_mmap_unlock(mapping);
90681- mutex_unlock(&mapping->i_mmap_mutex);
90682+ tmp = dup_vma(mm, oldmm, mpnt);
90683+ if (!tmp) {
90684+ retval = -ENOMEM;
90685+ goto out;
90686 }
90687
90688 /*
90689@@ -463,6 +541,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90690 if (retval)
90691 goto out;
90692 }
90693+
90694+#ifdef CONFIG_PAX_SEGMEXEC
90695+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
90696+ struct vm_area_struct *mpnt_m;
90697+
90698+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
90699+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
90700+
90701+ if (!mpnt->vm_mirror)
90702+ continue;
90703+
90704+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
90705+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
90706+ mpnt->vm_mirror = mpnt_m;
90707+ } else {
90708+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
90709+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
90710+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
90711+ mpnt->vm_mirror->vm_mirror = mpnt;
90712+ }
90713+ }
90714+ BUG_ON(mpnt_m);
90715+ }
90716+#endif
90717+
90718 /* a new mm has just been created */
90719 arch_dup_mmap(oldmm, mm);
90720 retval = 0;
90721@@ -472,14 +575,6 @@ out:
90722 up_write(&oldmm->mmap_sem);
90723 uprobe_end_dup_mmap();
90724 return retval;
90725-fail_nomem_anon_vma_fork:
90726- mpol_put(vma_policy(tmp));
90727-fail_nomem_policy:
90728- kmem_cache_free(vm_area_cachep, tmp);
90729-fail_nomem:
90730- retval = -ENOMEM;
90731- vm_unacct_memory(charge);
90732- goto out;
90733 }
90734
90735 static inline int mm_alloc_pgd(struct mm_struct *mm)
90736@@ -698,8 +793,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
90737 return ERR_PTR(err);
90738
90739 mm = get_task_mm(task);
90740- if (mm && mm != current->mm &&
90741- !ptrace_may_access(task, mode)) {
90742+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
90743+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
90744 mmput(mm);
90745 mm = ERR_PTR(-EACCES);
90746 }
90747@@ -918,13 +1013,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
90748 spin_unlock(&fs->lock);
90749 return -EAGAIN;
90750 }
90751- fs->users++;
90752+ atomic_inc(&fs->users);
90753 spin_unlock(&fs->lock);
90754 return 0;
90755 }
90756 tsk->fs = copy_fs_struct(fs);
90757 if (!tsk->fs)
90758 return -ENOMEM;
90759+ /* Carry through gr_chroot_dentry and is_chrooted instead
90760+ of recomputing it here. Already copied when the task struct
90761+ is duplicated. This allows pivot_root to not be treated as
90762+ a chroot
90763+ */
90764+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
90765+
90766 return 0;
90767 }
90768
90769@@ -1133,7 +1235,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
90770 * parts of the process environment (as per the clone
90771 * flags). The actual kick-off is left to the caller.
90772 */
90773-static struct task_struct *copy_process(unsigned long clone_flags,
90774+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
90775 unsigned long stack_start,
90776 unsigned long stack_size,
90777 int __user *child_tidptr,
90778@@ -1205,6 +1307,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
90779 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
90780 #endif
90781 retval = -EAGAIN;
90782+
90783+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
90784+
90785 if (atomic_read(&p->real_cred->user->processes) >=
90786 task_rlimit(p, RLIMIT_NPROC)) {
90787 if (p->real_cred->user != INIT_USER &&
90788@@ -1452,6 +1557,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
90789 goto bad_fork_free_pid;
90790 }
90791
90792+ /* synchronizes with gr_set_acls()
90793+ we need to call this past the point of no return for fork()
90794+ */
90795+ gr_copy_label(p);
90796+
90797 if (likely(p->pid)) {
90798 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
90799
90800@@ -1541,6 +1651,8 @@ bad_fork_cleanup_count:
90801 bad_fork_free:
90802 free_task(p);
90803 fork_out:
90804+ gr_log_forkfail(retval);
90805+
90806 return ERR_PTR(retval);
90807 }
90808
90809@@ -1602,6 +1714,7 @@ long do_fork(unsigned long clone_flags,
90810
90811 p = copy_process(clone_flags, stack_start, stack_size,
90812 child_tidptr, NULL, trace);
90813+ add_latent_entropy();
90814 /*
90815 * Do this prior waking up the new thread - the thread pointer
90816 * might get invalid after that point, if the thread exits quickly.
90817@@ -1618,6 +1731,8 @@ long do_fork(unsigned long clone_flags,
90818 if (clone_flags & CLONE_PARENT_SETTID)
90819 put_user(nr, parent_tidptr);
90820
90821+ gr_handle_brute_check();
90822+
90823 if (clone_flags & CLONE_VFORK) {
90824 p->vfork_done = &vfork;
90825 init_completion(&vfork);
90826@@ -1736,7 +1851,7 @@ void __init proc_caches_init(void)
90827 mm_cachep = kmem_cache_create("mm_struct",
90828 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
90829 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
90830- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
90831+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
90832 mmap_init();
90833 nsproxy_cache_init();
90834 }
90835@@ -1776,7 +1891,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
90836 return 0;
90837
90838 /* don't need lock here; in the worst case we'll do useless copy */
90839- if (fs->users == 1)
90840+ if (atomic_read(&fs->users) == 1)
90841 return 0;
90842
90843 *new_fsp = copy_fs_struct(fs);
90844@@ -1883,7 +1998,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
90845 fs = current->fs;
90846 spin_lock(&fs->lock);
90847 current->fs = new_fs;
90848- if (--fs->users)
90849+ gr_set_chroot_entries(current, &current->fs->root);
90850+ if (atomic_dec_return(&fs->users))
90851 new_fs = NULL;
90852 else
90853 new_fs = fs;
90854diff --git a/kernel/futex.c b/kernel/futex.c
90855index b632b5f..ca00da9 100644
90856--- a/kernel/futex.c
90857+++ b/kernel/futex.c
90858@@ -202,7 +202,7 @@ struct futex_pi_state {
90859 atomic_t refcount;
90860
90861 union futex_key key;
90862-};
90863+} __randomize_layout;
90864
90865 /**
90866 * struct futex_q - The hashed futex queue entry, one per waiting task
90867@@ -236,7 +236,7 @@ struct futex_q {
90868 struct rt_mutex_waiter *rt_waiter;
90869 union futex_key *requeue_pi_key;
90870 u32 bitset;
90871-};
90872+} __randomize_layout;
90873
90874 static const struct futex_q futex_q_init = {
90875 /* list gets initialized in queue_me()*/
90876@@ -394,6 +394,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
90877 struct page *page, *page_head;
90878 int err, ro = 0;
90879
90880+#ifdef CONFIG_PAX_SEGMEXEC
90881+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
90882+ return -EFAULT;
90883+#endif
90884+
90885 /*
90886 * The futex address must be "naturally" aligned.
90887 */
90888@@ -593,7 +598,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
90889
90890 static int get_futex_value_locked(u32 *dest, u32 __user *from)
90891 {
90892- int ret;
90893+ unsigned long ret;
90894
90895 pagefault_disable();
90896 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
90897@@ -3033,6 +3038,7 @@ static void __init futex_detect_cmpxchg(void)
90898 {
90899 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
90900 u32 curval;
90901+ mm_segment_t oldfs;
90902
90903 /*
90904 * This will fail and we want it. Some arch implementations do
90905@@ -3044,8 +3050,11 @@ static void __init futex_detect_cmpxchg(void)
90906 * implementation, the non-functional ones will return
90907 * -ENOSYS.
90908 */
90909+ oldfs = get_fs();
90910+ set_fs(USER_DS);
90911 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
90912 futex_cmpxchg_enabled = 1;
90913+ set_fs(oldfs);
90914 #endif
90915 }
90916
90917diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
90918index 55c8c93..9ba7ad6 100644
90919--- a/kernel/futex_compat.c
90920+++ b/kernel/futex_compat.c
90921@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
90922 return 0;
90923 }
90924
90925-static void __user *futex_uaddr(struct robust_list __user *entry,
90926+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
90927 compat_long_t futex_offset)
90928 {
90929 compat_uptr_t base = ptr_to_compat(entry);
90930diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
90931index b358a80..fc25240 100644
90932--- a/kernel/gcov/base.c
90933+++ b/kernel/gcov/base.c
90934@@ -114,11 +114,6 @@ void gcov_enable_events(void)
90935 }
90936
90937 #ifdef CONFIG_MODULES
90938-static inline int within(void *addr, void *start, unsigned long size)
90939-{
90940- return ((addr >= start) && (addr < start + size));
90941-}
90942-
90943 /* Update list and generate events when modules are unloaded. */
90944 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
90945 void *data)
90946@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
90947
90948 /* Remove entries located in module from linked list. */
90949 while ((info = gcov_info_next(info))) {
90950- if (within(info, mod->module_core, mod->core_size)) {
90951+ if (within_module_core_rw((unsigned long)info, mod)) {
90952 gcov_info_unlink(prev, info);
90953 if (gcov_events_enabled)
90954 gcov_event(GCOV_REMOVE, info);
90955diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
90956index 3ab2899..c6ad010 100644
90957--- a/kernel/hrtimer.c
90958+++ b/kernel/hrtimer.c
90959@@ -1449,7 +1449,7 @@ void hrtimer_peek_ahead_timers(void)
90960 local_irq_restore(flags);
90961 }
90962
90963-static void run_hrtimer_softirq(struct softirq_action *h)
90964+static __latent_entropy void run_hrtimer_softirq(void)
90965 {
90966 hrtimer_peek_ahead_timers();
90967 }
90968diff --git a/kernel/irq_work.c b/kernel/irq_work.c
90969index a82170e..5b01e7f 100644
90970--- a/kernel/irq_work.c
90971+++ b/kernel/irq_work.c
90972@@ -191,12 +191,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
90973 return NOTIFY_OK;
90974 }
90975
90976-static struct notifier_block cpu_notify;
90977+static struct notifier_block cpu_notify = {
90978+ .notifier_call = irq_work_cpu_notify,
90979+ .priority = 0,
90980+};
90981
90982 static __init int irq_work_init_cpu_notifier(void)
90983 {
90984- cpu_notify.notifier_call = irq_work_cpu_notify;
90985- cpu_notify.priority = 0;
90986 register_cpu_notifier(&cpu_notify);
90987 return 0;
90988 }
90989diff --git a/kernel/jump_label.c b/kernel/jump_label.c
90990index 9019f15..9a3c42e 100644
90991--- a/kernel/jump_label.c
90992+++ b/kernel/jump_label.c
90993@@ -14,6 +14,7 @@
90994 #include <linux/err.h>
90995 #include <linux/static_key.h>
90996 #include <linux/jump_label_ratelimit.h>
90997+#include <linux/mm.h>
90998
90999 #ifdef HAVE_JUMP_LABEL
91000
91001@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
91002
91003 size = (((unsigned long)stop - (unsigned long)start)
91004 / sizeof(struct jump_entry));
91005+ pax_open_kernel();
91006 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
91007+ pax_close_kernel();
91008 }
91009
91010 static void jump_label_update(struct static_key *key, int enable);
91011@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
91012 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
91013 struct jump_entry *iter;
91014
91015+ pax_open_kernel();
91016 for (iter = iter_start; iter < iter_stop; iter++) {
91017 if (within_module_init(iter->code, mod))
91018 iter->code = 0;
91019 }
91020+ pax_close_kernel();
91021 }
91022
91023 static int
91024diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
91025index cb0cf37..b69e161 100644
91026--- a/kernel/kallsyms.c
91027+++ b/kernel/kallsyms.c
91028@@ -11,6 +11,9 @@
91029 * Changed the compression method from stem compression to "table lookup"
91030 * compression (see scripts/kallsyms.c for a more complete description)
91031 */
91032+#ifdef CONFIG_GRKERNSEC_HIDESYM
91033+#define __INCLUDED_BY_HIDESYM 1
91034+#endif
91035 #include <linux/kallsyms.h>
91036 #include <linux/module.h>
91037 #include <linux/init.h>
91038@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
91039
91040 static inline int is_kernel_inittext(unsigned long addr)
91041 {
91042+ if (system_state != SYSTEM_BOOTING)
91043+ return 0;
91044+
91045 if (addr >= (unsigned long)_sinittext
91046 && addr <= (unsigned long)_einittext)
91047 return 1;
91048 return 0;
91049 }
91050
91051+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91052+#ifdef CONFIG_MODULES
91053+static inline int is_module_text(unsigned long addr)
91054+{
91055+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
91056+ return 1;
91057+
91058+ addr = ktla_ktva(addr);
91059+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
91060+}
91061+#else
91062+static inline int is_module_text(unsigned long addr)
91063+{
91064+ return 0;
91065+}
91066+#endif
91067+#endif
91068+
91069 static inline int is_kernel_text(unsigned long addr)
91070 {
91071 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
91072@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
91073
91074 static inline int is_kernel(unsigned long addr)
91075 {
91076+
91077+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91078+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
91079+ return 1;
91080+
91081+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
91082+#else
91083 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
91084+#endif
91085+
91086 return 1;
91087 return in_gate_area_no_mm(addr);
91088 }
91089
91090 static int is_ksym_addr(unsigned long addr)
91091 {
91092+
91093+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91094+ if (is_module_text(addr))
91095+ return 0;
91096+#endif
91097+
91098 if (all_var)
91099 return is_kernel(addr);
91100
91101@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
91102
91103 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
91104 {
91105- iter->name[0] = '\0';
91106 iter->nameoff = get_symbol_offset(new_pos);
91107 iter->pos = new_pos;
91108 }
91109@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
91110 {
91111 struct kallsym_iter *iter = m->private;
91112
91113+#ifdef CONFIG_GRKERNSEC_HIDESYM
91114+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
91115+ return 0;
91116+#endif
91117+
91118 /* Some debugging symbols have no name. Ignore them. */
91119 if (!iter->name[0])
91120 return 0;
91121@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
91122 */
91123 type = iter->exported ? toupper(iter->type) :
91124 tolower(iter->type);
91125+
91126 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
91127 type, iter->name, iter->module_name);
91128 } else
91129@@ -567,7 +611,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
91130 struct kallsym_iter *iter;
91131 int ret;
91132
91133- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
91134+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
91135 if (!iter)
91136 return -ENOMEM;
91137 reset_iter(iter, 0);
91138diff --git a/kernel/kcmp.c b/kernel/kcmp.c
91139index e30ac0f..3528cac 100644
91140--- a/kernel/kcmp.c
91141+++ b/kernel/kcmp.c
91142@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
91143 struct task_struct *task1, *task2;
91144 int ret;
91145
91146+#ifdef CONFIG_GRKERNSEC
91147+ return -ENOSYS;
91148+#endif
91149+
91150 rcu_read_lock();
91151
91152 /*
91153diff --git a/kernel/kexec.c b/kernel/kexec.c
91154index 4b8f0c9..fffd0df 100644
91155--- a/kernel/kexec.c
91156+++ b/kernel/kexec.c
91157@@ -1045,7 +1045,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
91158 compat_ulong_t, flags)
91159 {
91160 struct compat_kexec_segment in;
91161- struct kexec_segment out, __user *ksegments;
91162+ struct kexec_segment out;
91163+ struct kexec_segment __user *ksegments;
91164 unsigned long i, result;
91165
91166 /* Don't allow clients that don't understand the native
91167diff --git a/kernel/kmod.c b/kernel/kmod.c
91168index 8637e04..8b1d0d8 100644
91169--- a/kernel/kmod.c
91170+++ b/kernel/kmod.c
91171@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
91172 kfree(info->argv);
91173 }
91174
91175-static int call_modprobe(char *module_name, int wait)
91176+static int call_modprobe(char *module_name, char *module_param, int wait)
91177 {
91178 struct subprocess_info *info;
91179 static char *envp[] = {
91180@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
91181 NULL
91182 };
91183
91184- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
91185+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
91186 if (!argv)
91187 goto out;
91188
91189@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
91190 argv[1] = "-q";
91191 argv[2] = "--";
91192 argv[3] = module_name; /* check free_modprobe_argv() */
91193- argv[4] = NULL;
91194+ argv[4] = module_param;
91195+ argv[5] = NULL;
91196
91197 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
91198 NULL, free_modprobe_argv, NULL);
91199@@ -129,9 +130,8 @@ out:
91200 * If module auto-loading support is disabled then this function
91201 * becomes a no-operation.
91202 */
91203-int __request_module(bool wait, const char *fmt, ...)
91204+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
91205 {
91206- va_list args;
91207 char module_name[MODULE_NAME_LEN];
91208 unsigned int max_modprobes;
91209 int ret;
91210@@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...)
91211 if (!modprobe_path[0])
91212 return 0;
91213
91214- va_start(args, fmt);
91215- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
91216- va_end(args);
91217+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
91218 if (ret >= MODULE_NAME_LEN)
91219 return -ENAMETOOLONG;
91220
91221@@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...)
91222 if (ret)
91223 return ret;
91224
91225+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91226+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
91227+ /* hack to workaround consolekit/udisks stupidity */
91228+ read_lock(&tasklist_lock);
91229+ if (!strcmp(current->comm, "mount") &&
91230+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
91231+ read_unlock(&tasklist_lock);
91232+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
91233+ return -EPERM;
91234+ }
91235+ read_unlock(&tasklist_lock);
91236+ }
91237+#endif
91238+
91239 /* If modprobe needs a service that is in a module, we get a recursive
91240 * loop. Limit the number of running kmod threads to max_threads/2 or
91241 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
91242@@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...)
91243
91244 trace_module_request(module_name, wait, _RET_IP_);
91245
91246- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
91247+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
91248
91249 atomic_dec(&kmod_concurrent);
91250 return ret;
91251 }
91252+
91253+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
91254+{
91255+ va_list args;
91256+ int ret;
91257+
91258+ va_start(args, fmt);
91259+ ret = ____request_module(wait, module_param, fmt, args);
91260+ va_end(args);
91261+
91262+ return ret;
91263+}
91264+
91265+int __request_module(bool wait, const char *fmt, ...)
91266+{
91267+ va_list args;
91268+ int ret;
91269+
91270+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91271+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
91272+ char module_param[MODULE_NAME_LEN];
91273+
91274+ memset(module_param, 0, sizeof(module_param));
91275+
91276+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
91277+
91278+ va_start(args, fmt);
91279+ ret = ____request_module(wait, module_param, fmt, args);
91280+ va_end(args);
91281+
91282+ return ret;
91283+ }
91284+#endif
91285+
91286+ va_start(args, fmt);
91287+ ret = ____request_module(wait, NULL, fmt, args);
91288+ va_end(args);
91289+
91290+ return ret;
91291+}
91292+
91293 EXPORT_SYMBOL(__request_module);
91294 #endif /* CONFIG_MODULES */
91295
91296@@ -218,6 +271,20 @@ static int ____call_usermodehelper(void *data)
91297 */
91298 set_user_nice(current, 0);
91299
91300+#ifdef CONFIG_GRKERNSEC
91301+ /* this is race-free as far as userland is concerned as we copied
91302+ out the path to be used prior to this point and are now operating
91303+ on that copy
91304+ */
91305+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
91306+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
91307+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
91308+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
91309+ retval = -EPERM;
91310+ goto fail;
91311+ }
91312+#endif
91313+
91314 retval = -ENOMEM;
91315 new = prepare_kernel_cred(current);
91316 if (!new)
91317@@ -240,8 +307,8 @@ static int ____call_usermodehelper(void *data)
91318 commit_creds(new);
91319
91320 retval = do_execve(getname_kernel(sub_info->path),
91321- (const char __user *const __user *)sub_info->argv,
91322- (const char __user *const __user *)sub_info->envp);
91323+ (const char __user *const __force_user *)sub_info->argv,
91324+ (const char __user *const __force_user *)sub_info->envp);
91325 if (!retval)
91326 return 0;
91327
91328@@ -260,6 +327,10 @@ static int call_helper(void *data)
91329
91330 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
91331 {
91332+#ifdef CONFIG_GRKERNSEC
91333+ kfree(info->path);
91334+ info->path = info->origpath;
91335+#endif
91336 if (info->cleanup)
91337 (*info->cleanup)(info);
91338 kfree(info);
91339@@ -300,7 +371,7 @@ static int wait_for_helper(void *data)
91340 *
91341 * Thus the __user pointer cast is valid here.
91342 */
91343- sys_wait4(pid, (int __user *)&ret, 0, NULL);
91344+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
91345
91346 /*
91347 * If ret is 0, either ____call_usermodehelper failed and the
91348@@ -539,7 +610,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
91349 goto out;
91350
91351 INIT_WORK(&sub_info->work, __call_usermodehelper);
91352+#ifdef CONFIG_GRKERNSEC
91353+ sub_info->origpath = path;
91354+ sub_info->path = kstrdup(path, gfp_mask);
91355+#else
91356 sub_info->path = path;
91357+#endif
91358 sub_info->argv = argv;
91359 sub_info->envp = envp;
91360
91361@@ -647,7 +723,7 @@ EXPORT_SYMBOL(call_usermodehelper);
91362 static int proc_cap_handler(struct ctl_table *table, int write,
91363 void __user *buffer, size_t *lenp, loff_t *ppos)
91364 {
91365- struct ctl_table t;
91366+ ctl_table_no_const t;
91367 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
91368 kernel_cap_t new_cap;
91369 int err, i;
91370diff --git a/kernel/kprobes.c b/kernel/kprobes.c
91371index 734e9a7..0a313b8 100644
91372--- a/kernel/kprobes.c
91373+++ b/kernel/kprobes.c
91374@@ -31,6 +31,9 @@
91375 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
91376 * <prasanna@in.ibm.com> added function-return probes.
91377 */
91378+#ifdef CONFIG_GRKERNSEC_HIDESYM
91379+#define __INCLUDED_BY_HIDESYM 1
91380+#endif
91381 #include <linux/kprobes.h>
91382 #include <linux/hash.h>
91383 #include <linux/init.h>
91384@@ -122,12 +125,12 @@ enum kprobe_slot_state {
91385
91386 static void *alloc_insn_page(void)
91387 {
91388- return module_alloc(PAGE_SIZE);
91389+ return module_alloc_exec(PAGE_SIZE);
91390 }
91391
91392 static void free_insn_page(void *page)
91393 {
91394- module_free(NULL, page);
91395+ module_free_exec(NULL, page);
91396 }
91397
91398 struct kprobe_insn_cache kprobe_insn_slots = {
91399@@ -2176,11 +2179,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
91400 kprobe_type = "k";
91401
91402 if (sym)
91403- seq_printf(pi, "%p %s %s+0x%x %s ",
91404+ seq_printf(pi, "%pK %s %s+0x%x %s ",
91405 p->addr, kprobe_type, sym, offset,
91406 (modname ? modname : " "));
91407 else
91408- seq_printf(pi, "%p %s %p ",
91409+ seq_printf(pi, "%pK %s %pK ",
91410 p->addr, kprobe_type, p->addr);
91411
91412 if (!pp)
91413diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
91414index 6683cce..daf8999 100644
91415--- a/kernel/ksysfs.c
91416+++ b/kernel/ksysfs.c
91417@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
91418 {
91419 if (count+1 > UEVENT_HELPER_PATH_LEN)
91420 return -ENOENT;
91421+ if (!capable(CAP_SYS_ADMIN))
91422+ return -EPERM;
91423 memcpy(uevent_helper, buf, count);
91424 uevent_helper[count] = '\0';
91425 if (count && uevent_helper[count-1] == '\n')
91426@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
91427 return count;
91428 }
91429
91430-static struct bin_attribute notes_attr = {
91431+static bin_attribute_no_const notes_attr __read_only = {
91432 .attr = {
91433 .name = "notes",
91434 .mode = S_IRUGO,
91435diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
91436index d24e433..fa04fb8 100644
91437--- a/kernel/locking/lockdep.c
91438+++ b/kernel/locking/lockdep.c
91439@@ -597,6 +597,10 @@ static int static_obj(void *obj)
91440 end = (unsigned long) &_end,
91441 addr = (unsigned long) obj;
91442
91443+#ifdef CONFIG_PAX_KERNEXEC
91444+ start = ktla_ktva(start);
91445+#endif
91446+
91447 /*
91448 * static variable?
91449 */
91450@@ -738,6 +742,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
91451 if (!static_obj(lock->key)) {
91452 debug_locks_off();
91453 printk("INFO: trying to register non-static key.\n");
91454+ printk("lock:%pS key:%pS.\n", lock, lock->key);
91455 printk("the code is fine but needs lockdep annotation.\n");
91456 printk("turning off the locking correctness validator.\n");
91457 dump_stack();
91458@@ -3079,7 +3084,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
91459 if (!class)
91460 return 0;
91461 }
91462- atomic_inc((atomic_t *)&class->ops);
91463+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
91464 if (very_verbose(class)) {
91465 printk("\nacquire class [%p] %s", class->key, class->name);
91466 if (class->name_version > 1)
91467diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
91468index ef43ac4..2720dfa 100644
91469--- a/kernel/locking/lockdep_proc.c
91470+++ b/kernel/locking/lockdep_proc.c
91471@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
91472 return 0;
91473 }
91474
91475- seq_printf(m, "%p", class->key);
91476+ seq_printf(m, "%pK", class->key);
91477 #ifdef CONFIG_DEBUG_LOCKDEP
91478 seq_printf(m, " OPS:%8ld", class->ops);
91479 #endif
91480@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
91481
91482 list_for_each_entry(entry, &class->locks_after, entry) {
91483 if (entry->distance == 1) {
91484- seq_printf(m, " -> [%p] ", entry->class->key);
91485+ seq_printf(m, " -> [%pK] ", entry->class->key);
91486 print_name(m, entry->class);
91487 seq_puts(m, "\n");
91488 }
91489@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
91490 if (!class->key)
91491 continue;
91492
91493- seq_printf(m, "[%p] ", class->key);
91494+ seq_printf(m, "[%pK] ", class->key);
91495 print_name(m, class);
91496 seq_puts(m, "\n");
91497 }
91498@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
91499 if (!i)
91500 seq_line(m, '-', 40-namelen, namelen);
91501
91502- snprintf(ip, sizeof(ip), "[<%p>]",
91503+ snprintf(ip, sizeof(ip), "[<%pK>]",
91504 (void *)class->contention_point[i]);
91505 seq_printf(m, "%40s %14lu %29s %pS\n",
91506 name, stats->contention_point[i],
91507@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
91508 if (!i)
91509 seq_line(m, '-', 40-namelen, namelen);
91510
91511- snprintf(ip, sizeof(ip), "[<%p>]",
91512+ snprintf(ip, sizeof(ip), "[<%pK>]",
91513 (void *)class->contending_point[i]);
91514 seq_printf(m, "%40s %14lu %29s %pS\n",
91515 name, stats->contending_point[i],
91516diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
91517index be9ee15..39d6233 100644
91518--- a/kernel/locking/mcs_spinlock.c
91519+++ b/kernel/locking/mcs_spinlock.c
91520@@ -102,7 +102,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
91521
91522 prev = decode_cpu(old);
91523 node->prev = prev;
91524- ACCESS_ONCE(prev->next) = node;
91525+ ACCESS_ONCE_RW(prev->next) = node;
91526
91527 /*
91528 * Normally @prev is untouchable after the above store; because at that
91529@@ -174,8 +174,8 @@ unqueue:
91530 * it will wait in Step-A.
91531 */
91532
91533- ACCESS_ONCE(next->prev) = prev;
91534- ACCESS_ONCE(prev->next) = next;
91535+ ACCESS_ONCE_RW(next->prev) = prev;
91536+ ACCESS_ONCE_RW(prev->next) = next;
91537
91538 return false;
91539 }
91540@@ -197,13 +197,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
91541 node = this_cpu_ptr(&osq_node);
91542 next = xchg(&node->next, NULL);
91543 if (next) {
91544- ACCESS_ONCE(next->locked) = 1;
91545+ ACCESS_ONCE_RW(next->locked) = 1;
91546 return;
91547 }
91548
91549 next = osq_wait_next(lock, node, NULL);
91550 if (next)
91551- ACCESS_ONCE(next->locked) = 1;
91552+ ACCESS_ONCE_RW(next->locked) = 1;
91553 }
91554
91555 #endif
91556diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
91557index 74356dc..48dd5e1 100644
91558--- a/kernel/locking/mcs_spinlock.h
91559+++ b/kernel/locking/mcs_spinlock.h
91560@@ -81,7 +81,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
91561 */
91562 return;
91563 }
91564- ACCESS_ONCE(prev->next) = node;
91565+ ACCESS_ONCE_RW(prev->next) = node;
91566
91567 /* Wait until the lock holder passes the lock down. */
91568 arch_mcs_spin_lock_contended(&node->locked);
91569diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
91570index 5cf6731..ce3bc5a 100644
91571--- a/kernel/locking/mutex-debug.c
91572+++ b/kernel/locking/mutex-debug.c
91573@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
91574 }
91575
91576 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91577- struct thread_info *ti)
91578+ struct task_struct *task)
91579 {
91580 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
91581
91582 /* Mark the current thread as blocked on the lock: */
91583- ti->task->blocked_on = waiter;
91584+ task->blocked_on = waiter;
91585 }
91586
91587 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91588- struct thread_info *ti)
91589+ struct task_struct *task)
91590 {
91591 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
91592- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
91593- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
91594- ti->task->blocked_on = NULL;
91595+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
91596+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
91597+ task->blocked_on = NULL;
91598
91599 list_del_init(&waiter->list);
91600 waiter->task = NULL;
91601diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
91602index 0799fd3..d06ae3b 100644
91603--- a/kernel/locking/mutex-debug.h
91604+++ b/kernel/locking/mutex-debug.h
91605@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
91606 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
91607 extern void debug_mutex_add_waiter(struct mutex *lock,
91608 struct mutex_waiter *waiter,
91609- struct thread_info *ti);
91610+ struct task_struct *task);
91611 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91612- struct thread_info *ti);
91613+ struct task_struct *task);
91614 extern void debug_mutex_unlock(struct mutex *lock);
91615 extern void debug_mutex_init(struct mutex *lock, const char *name,
91616 struct lock_class_key *key);
91617diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
91618index acca2c1..ddeaea8 100644
91619--- a/kernel/locking/mutex.c
91620+++ b/kernel/locking/mutex.c
91621@@ -490,7 +490,7 @@ slowpath:
91622 goto skip_wait;
91623
91624 debug_mutex_lock_common(lock, &waiter);
91625- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
91626+ debug_mutex_add_waiter(lock, &waiter, task);
91627
91628 /* add waiting tasks to the end of the waitqueue (FIFO): */
91629 list_add_tail(&waiter.list, &lock->wait_list);
91630@@ -534,7 +534,7 @@ slowpath:
91631 schedule_preempt_disabled();
91632 spin_lock_mutex(&lock->wait_lock, flags);
91633 }
91634- mutex_remove_waiter(lock, &waiter, current_thread_info());
91635+ mutex_remove_waiter(lock, &waiter, task);
91636 /* set it to 0 if there are no waiters left: */
91637 if (likely(list_empty(&lock->wait_list)))
91638 atomic_set(&lock->count, 0);
91639@@ -571,7 +571,7 @@ skip_wait:
91640 return 0;
91641
91642 err:
91643- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
91644+ mutex_remove_waiter(lock, &waiter, task);
91645 spin_unlock_mutex(&lock->wait_lock, flags);
91646 debug_mutex_free_waiter(&waiter);
91647 mutex_release(&lock->dep_map, 1, ip);
91648diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
91649index 1d96dd0..994ff19 100644
91650--- a/kernel/locking/rtmutex-tester.c
91651+++ b/kernel/locking/rtmutex-tester.c
91652@@ -22,7 +22,7 @@
91653 #define MAX_RT_TEST_MUTEXES 8
91654
91655 static spinlock_t rttest_lock;
91656-static atomic_t rttest_event;
91657+static atomic_unchecked_t rttest_event;
91658
91659 struct test_thread_data {
91660 int opcode;
91661@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91662
91663 case RTTEST_LOCKCONT:
91664 td->mutexes[td->opdata] = 1;
91665- td->event = atomic_add_return(1, &rttest_event);
91666+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91667 return 0;
91668
91669 case RTTEST_RESET:
91670@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91671 return 0;
91672
91673 case RTTEST_RESETEVENT:
91674- atomic_set(&rttest_event, 0);
91675+ atomic_set_unchecked(&rttest_event, 0);
91676 return 0;
91677
91678 default:
91679@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91680 return ret;
91681
91682 td->mutexes[id] = 1;
91683- td->event = atomic_add_return(1, &rttest_event);
91684+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91685 rt_mutex_lock(&mutexes[id]);
91686- td->event = atomic_add_return(1, &rttest_event);
91687+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91688 td->mutexes[id] = 4;
91689 return 0;
91690
91691@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91692 return ret;
91693
91694 td->mutexes[id] = 1;
91695- td->event = atomic_add_return(1, &rttest_event);
91696+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91697 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
91698- td->event = atomic_add_return(1, &rttest_event);
91699+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91700 td->mutexes[id] = ret ? 0 : 4;
91701 return ret ? -EINTR : 0;
91702
91703@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91704 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
91705 return ret;
91706
91707- td->event = atomic_add_return(1, &rttest_event);
91708+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91709 rt_mutex_unlock(&mutexes[id]);
91710- td->event = atomic_add_return(1, &rttest_event);
91711+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91712 td->mutexes[id] = 0;
91713 return 0;
91714
91715@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91716 break;
91717
91718 td->mutexes[dat] = 2;
91719- td->event = atomic_add_return(1, &rttest_event);
91720+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91721 break;
91722
91723 default:
91724@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91725 return;
91726
91727 td->mutexes[dat] = 3;
91728- td->event = atomic_add_return(1, &rttest_event);
91729+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91730 break;
91731
91732 case RTTEST_LOCKNOWAIT:
91733@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91734 return;
91735
91736 td->mutexes[dat] = 1;
91737- td->event = atomic_add_return(1, &rttest_event);
91738+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91739 return;
91740
91741 default:
91742diff --git a/kernel/module.c b/kernel/module.c
91743index 81e727c..a8ea6f9 100644
91744--- a/kernel/module.c
91745+++ b/kernel/module.c
91746@@ -61,6 +61,7 @@
91747 #include <linux/pfn.h>
91748 #include <linux/bsearch.h>
91749 #include <linux/fips.h>
91750+#include <linux/grsecurity.h>
91751 #include <uapi/linux/module.h>
91752 #include "module-internal.h"
91753
91754@@ -157,7 +158,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
91755
91756 /* Bounds of module allocation, for speeding __module_address.
91757 * Protected by module_mutex. */
91758-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
91759+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
91760+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
91761
91762 int register_module_notifier(struct notifier_block * nb)
91763 {
91764@@ -324,7 +326,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
91765 return true;
91766
91767 list_for_each_entry_rcu(mod, &modules, list) {
91768- struct symsearch arr[] = {
91769+ struct symsearch modarr[] = {
91770 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
91771 NOT_GPL_ONLY, false },
91772 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
91773@@ -349,7 +351,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
91774 if (mod->state == MODULE_STATE_UNFORMED)
91775 continue;
91776
91777- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
91778+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
91779 return true;
91780 }
91781 return false;
91782@@ -489,7 +491,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
91783 if (!pcpusec->sh_size)
91784 return 0;
91785
91786- if (align > PAGE_SIZE) {
91787+ if (align-1 >= PAGE_SIZE) {
91788 pr_warn("%s: per-cpu alignment %li > %li\n",
91789 mod->name, align, PAGE_SIZE);
91790 align = PAGE_SIZE;
91791@@ -1061,7 +1063,7 @@ struct module_attribute module_uevent =
91792 static ssize_t show_coresize(struct module_attribute *mattr,
91793 struct module_kobject *mk, char *buffer)
91794 {
91795- return sprintf(buffer, "%u\n", mk->mod->core_size);
91796+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
91797 }
91798
91799 static struct module_attribute modinfo_coresize =
91800@@ -1070,7 +1072,7 @@ static struct module_attribute modinfo_coresize =
91801 static ssize_t show_initsize(struct module_attribute *mattr,
91802 struct module_kobject *mk, char *buffer)
91803 {
91804- return sprintf(buffer, "%u\n", mk->mod->init_size);
91805+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
91806 }
91807
91808 static struct module_attribute modinfo_initsize =
91809@@ -1162,12 +1164,29 @@ static int check_version(Elf_Shdr *sechdrs,
91810 goto bad_version;
91811 }
91812
91813+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
91814+ /*
91815+ * avoid potentially printing jibberish on attempted load
91816+ * of a module randomized with a different seed
91817+ */
91818+ pr_warn("no symbol version for %s\n", symname);
91819+#else
91820 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
91821+#endif
91822 return 0;
91823
91824 bad_version:
91825+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
91826+ /*
91827+ * avoid potentially printing jibberish on attempted load
91828+ * of a module randomized with a different seed
91829+ */
91830+ printk("attempted module disagrees about version of symbol %s\n",
91831+ symname);
91832+#else
91833 printk("%s: disagrees about version of symbol %s\n",
91834 mod->name, symname);
91835+#endif
91836 return 0;
91837 }
91838
91839@@ -1283,7 +1302,7 @@ resolve_symbol_wait(struct module *mod,
91840 */
91841 #ifdef CONFIG_SYSFS
91842
91843-#ifdef CONFIG_KALLSYMS
91844+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
91845 static inline bool sect_empty(const Elf_Shdr *sect)
91846 {
91847 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
91848@@ -1423,7 +1442,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
91849 {
91850 unsigned int notes, loaded, i;
91851 struct module_notes_attrs *notes_attrs;
91852- struct bin_attribute *nattr;
91853+ bin_attribute_no_const *nattr;
91854
91855 /* failed to create section attributes, so can't create notes */
91856 if (!mod->sect_attrs)
91857@@ -1535,7 +1554,7 @@ static void del_usage_links(struct module *mod)
91858 static int module_add_modinfo_attrs(struct module *mod)
91859 {
91860 struct module_attribute *attr;
91861- struct module_attribute *temp_attr;
91862+ module_attribute_no_const *temp_attr;
91863 int error = 0;
91864 int i;
91865
91866@@ -1756,21 +1775,21 @@ static void set_section_ro_nx(void *base,
91867
91868 static void unset_module_core_ro_nx(struct module *mod)
91869 {
91870- set_page_attributes(mod->module_core + mod->core_text_size,
91871- mod->module_core + mod->core_size,
91872+ set_page_attributes(mod->module_core_rw,
91873+ mod->module_core_rw + mod->core_size_rw,
91874 set_memory_x);
91875- set_page_attributes(mod->module_core,
91876- mod->module_core + mod->core_ro_size,
91877+ set_page_attributes(mod->module_core_rx,
91878+ mod->module_core_rx + mod->core_size_rx,
91879 set_memory_rw);
91880 }
91881
91882 static void unset_module_init_ro_nx(struct module *mod)
91883 {
91884- set_page_attributes(mod->module_init + mod->init_text_size,
91885- mod->module_init + mod->init_size,
91886+ set_page_attributes(mod->module_init_rw,
91887+ mod->module_init_rw + mod->init_size_rw,
91888 set_memory_x);
91889- set_page_attributes(mod->module_init,
91890- mod->module_init + mod->init_ro_size,
91891+ set_page_attributes(mod->module_init_rx,
91892+ mod->module_init_rx + mod->init_size_rx,
91893 set_memory_rw);
91894 }
91895
91896@@ -1783,14 +1802,14 @@ void set_all_modules_text_rw(void)
91897 list_for_each_entry_rcu(mod, &modules, list) {
91898 if (mod->state == MODULE_STATE_UNFORMED)
91899 continue;
91900- if ((mod->module_core) && (mod->core_text_size)) {
91901- set_page_attributes(mod->module_core,
91902- mod->module_core + mod->core_text_size,
91903+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
91904+ set_page_attributes(mod->module_core_rx,
91905+ mod->module_core_rx + mod->core_size_rx,
91906 set_memory_rw);
91907 }
91908- if ((mod->module_init) && (mod->init_text_size)) {
91909- set_page_attributes(mod->module_init,
91910- mod->module_init + mod->init_text_size,
91911+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
91912+ set_page_attributes(mod->module_init_rx,
91913+ mod->module_init_rx + mod->init_size_rx,
91914 set_memory_rw);
91915 }
91916 }
91917@@ -1806,14 +1825,14 @@ void set_all_modules_text_ro(void)
91918 list_for_each_entry_rcu(mod, &modules, list) {
91919 if (mod->state == MODULE_STATE_UNFORMED)
91920 continue;
91921- if ((mod->module_core) && (mod->core_text_size)) {
91922- set_page_attributes(mod->module_core,
91923- mod->module_core + mod->core_text_size,
91924+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
91925+ set_page_attributes(mod->module_core_rx,
91926+ mod->module_core_rx + mod->core_size_rx,
91927 set_memory_ro);
91928 }
91929- if ((mod->module_init) && (mod->init_text_size)) {
91930- set_page_attributes(mod->module_init,
91931- mod->module_init + mod->init_text_size,
91932+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
91933+ set_page_attributes(mod->module_init_rx,
91934+ mod->module_init_rx + mod->init_size_rx,
91935 set_memory_ro);
91936 }
91937 }
91938@@ -1864,16 +1883,19 @@ static void free_module(struct module *mod)
91939
91940 /* This may be NULL, but that's OK */
91941 unset_module_init_ro_nx(mod);
91942- module_free(mod, mod->module_init);
91943+ module_free(mod, mod->module_init_rw);
91944+ module_free_exec(mod, mod->module_init_rx);
91945 kfree(mod->args);
91946 percpu_modfree(mod);
91947
91948 /* Free lock-classes: */
91949- lockdep_free_key_range(mod->module_core, mod->core_size);
91950+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
91951+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
91952
91953 /* Finally, free the core (containing the module structure) */
91954 unset_module_core_ro_nx(mod);
91955- module_free(mod, mod->module_core);
91956+ module_free_exec(mod, mod->module_core_rx);
91957+ module_free(mod, mod->module_core_rw);
91958
91959 #ifdef CONFIG_MPU
91960 update_protections(current->mm);
91961@@ -1942,9 +1964,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
91962 int ret = 0;
91963 const struct kernel_symbol *ksym;
91964
91965+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91966+ int is_fs_load = 0;
91967+ int register_filesystem_found = 0;
91968+ char *p;
91969+
91970+ p = strstr(mod->args, "grsec_modharden_fs");
91971+ if (p) {
91972+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
91973+ /* copy \0 as well */
91974+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
91975+ is_fs_load = 1;
91976+ }
91977+#endif
91978+
91979 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
91980 const char *name = info->strtab + sym[i].st_name;
91981
91982+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91983+ /* it's a real shame this will never get ripped and copied
91984+ upstream! ;(
91985+ */
91986+ if (is_fs_load && !strcmp(name, "register_filesystem"))
91987+ register_filesystem_found = 1;
91988+#endif
91989+
91990 switch (sym[i].st_shndx) {
91991 case SHN_COMMON:
91992 /* Ignore common symbols */
91993@@ -1969,7 +2013,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
91994 ksym = resolve_symbol_wait(mod, info, name);
91995 /* Ok if resolved. */
91996 if (ksym && !IS_ERR(ksym)) {
91997+ pax_open_kernel();
91998 sym[i].st_value = ksym->value;
91999+ pax_close_kernel();
92000 break;
92001 }
92002
92003@@ -1988,11 +2034,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92004 secbase = (unsigned long)mod_percpu(mod);
92005 else
92006 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
92007+ pax_open_kernel();
92008 sym[i].st_value += secbase;
92009+ pax_close_kernel();
92010 break;
92011 }
92012 }
92013
92014+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92015+ if (is_fs_load && !register_filesystem_found) {
92016+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
92017+ ret = -EPERM;
92018+ }
92019+#endif
92020+
92021 return ret;
92022 }
92023
92024@@ -2076,22 +2131,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
92025 || s->sh_entsize != ~0UL
92026 || strstarts(sname, ".init"))
92027 continue;
92028- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
92029+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92030+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
92031+ else
92032+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
92033 pr_debug("\t%s\n", sname);
92034 }
92035- switch (m) {
92036- case 0: /* executable */
92037- mod->core_size = debug_align(mod->core_size);
92038- mod->core_text_size = mod->core_size;
92039- break;
92040- case 1: /* RO: text and ro-data */
92041- mod->core_size = debug_align(mod->core_size);
92042- mod->core_ro_size = mod->core_size;
92043- break;
92044- case 3: /* whole core */
92045- mod->core_size = debug_align(mod->core_size);
92046- break;
92047- }
92048 }
92049
92050 pr_debug("Init section allocation order:\n");
92051@@ -2105,23 +2150,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
92052 || s->sh_entsize != ~0UL
92053 || !strstarts(sname, ".init"))
92054 continue;
92055- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
92056- | INIT_OFFSET_MASK);
92057+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92058+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
92059+ else
92060+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
92061+ s->sh_entsize |= INIT_OFFSET_MASK;
92062 pr_debug("\t%s\n", sname);
92063 }
92064- switch (m) {
92065- case 0: /* executable */
92066- mod->init_size = debug_align(mod->init_size);
92067- mod->init_text_size = mod->init_size;
92068- break;
92069- case 1: /* RO: text and ro-data */
92070- mod->init_size = debug_align(mod->init_size);
92071- mod->init_ro_size = mod->init_size;
92072- break;
92073- case 3: /* whole init */
92074- mod->init_size = debug_align(mod->init_size);
92075- break;
92076- }
92077 }
92078 }
92079
92080@@ -2294,7 +2329,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
92081
92082 /* Put symbol section at end of init part of module. */
92083 symsect->sh_flags |= SHF_ALLOC;
92084- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
92085+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
92086 info->index.sym) | INIT_OFFSET_MASK;
92087 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
92088
92089@@ -2311,13 +2346,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
92090 }
92091
92092 /* Append room for core symbols at end of core part. */
92093- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
92094- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
92095- mod->core_size += strtab_size;
92096+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
92097+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
92098+ mod->core_size_rx += strtab_size;
92099
92100 /* Put string table section at end of init part of module. */
92101 strsect->sh_flags |= SHF_ALLOC;
92102- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
92103+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
92104 info->index.str) | INIT_OFFSET_MASK;
92105 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
92106 }
92107@@ -2335,12 +2370,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
92108 /* Make sure we get permanent strtab: don't use info->strtab. */
92109 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
92110
92111+ pax_open_kernel();
92112+
92113 /* Set types up while we still have access to sections. */
92114 for (i = 0; i < mod->num_symtab; i++)
92115 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
92116
92117- mod->core_symtab = dst = mod->module_core + info->symoffs;
92118- mod->core_strtab = s = mod->module_core + info->stroffs;
92119+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
92120+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
92121 src = mod->symtab;
92122 for (ndst = i = 0; i < mod->num_symtab; i++) {
92123 if (i == 0 ||
92124@@ -2352,6 +2389,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
92125 }
92126 }
92127 mod->core_num_syms = ndst;
92128+
92129+ pax_close_kernel();
92130 }
92131 #else
92132 static inline void layout_symtab(struct module *mod, struct load_info *info)
92133@@ -2385,17 +2424,33 @@ void * __weak module_alloc(unsigned long size)
92134 return vmalloc_exec(size);
92135 }
92136
92137-static void *module_alloc_update_bounds(unsigned long size)
92138+static void *module_alloc_update_bounds_rw(unsigned long size)
92139 {
92140 void *ret = module_alloc(size);
92141
92142 if (ret) {
92143 mutex_lock(&module_mutex);
92144 /* Update module bounds. */
92145- if ((unsigned long)ret < module_addr_min)
92146- module_addr_min = (unsigned long)ret;
92147- if ((unsigned long)ret + size > module_addr_max)
92148- module_addr_max = (unsigned long)ret + size;
92149+ if ((unsigned long)ret < module_addr_min_rw)
92150+ module_addr_min_rw = (unsigned long)ret;
92151+ if ((unsigned long)ret + size > module_addr_max_rw)
92152+ module_addr_max_rw = (unsigned long)ret + size;
92153+ mutex_unlock(&module_mutex);
92154+ }
92155+ return ret;
92156+}
92157+
92158+static void *module_alloc_update_bounds_rx(unsigned long size)
92159+{
92160+ void *ret = module_alloc_exec(size);
92161+
92162+ if (ret) {
92163+ mutex_lock(&module_mutex);
92164+ /* Update module bounds. */
92165+ if ((unsigned long)ret < module_addr_min_rx)
92166+ module_addr_min_rx = (unsigned long)ret;
92167+ if ((unsigned long)ret + size > module_addr_max_rx)
92168+ module_addr_max_rx = (unsigned long)ret + size;
92169 mutex_unlock(&module_mutex);
92170 }
92171 return ret;
92172@@ -2652,7 +2707,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
92173 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
92174
92175 if (info->index.sym == 0) {
92176+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
92177+ /*
92178+ * avoid potentially printing jibberish on attempted load
92179+ * of a module randomized with a different seed
92180+ */
92181+ pr_warn("module has no symbols (stripped?)\n");
92182+#else
92183 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
92184+#endif
92185 return ERR_PTR(-ENOEXEC);
92186 }
92187
92188@@ -2668,8 +2731,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
92189 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
92190 {
92191 const char *modmagic = get_modinfo(info, "vermagic");
92192+ const char *license = get_modinfo(info, "license");
92193 int err;
92194
92195+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
92196+ if (!license || !license_is_gpl_compatible(license))
92197+ return -ENOEXEC;
92198+#endif
92199+
92200 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
92201 modmagic = NULL;
92202
92203@@ -2694,7 +2763,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
92204 }
92205
92206 /* Set up license info based on the info section */
92207- set_license(mod, get_modinfo(info, "license"));
92208+ set_license(mod, license);
92209
92210 return 0;
92211 }
92212@@ -2788,7 +2857,7 @@ static int move_module(struct module *mod, struct load_info *info)
92213 void *ptr;
92214
92215 /* Do the allocs. */
92216- ptr = module_alloc_update_bounds(mod->core_size);
92217+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
92218 /*
92219 * The pointer to this block is stored in the module structure
92220 * which is inside the block. Just mark it as not being a
92221@@ -2798,11 +2867,11 @@ static int move_module(struct module *mod, struct load_info *info)
92222 if (!ptr)
92223 return -ENOMEM;
92224
92225- memset(ptr, 0, mod->core_size);
92226- mod->module_core = ptr;
92227+ memset(ptr, 0, mod->core_size_rw);
92228+ mod->module_core_rw = ptr;
92229
92230- if (mod->init_size) {
92231- ptr = module_alloc_update_bounds(mod->init_size);
92232+ if (mod->init_size_rw) {
92233+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
92234 /*
92235 * The pointer to this block is stored in the module structure
92236 * which is inside the block. This block doesn't need to be
92237@@ -2811,13 +2880,45 @@ static int move_module(struct module *mod, struct load_info *info)
92238 */
92239 kmemleak_ignore(ptr);
92240 if (!ptr) {
92241- module_free(mod, mod->module_core);
92242+ module_free(mod, mod->module_core_rw);
92243 return -ENOMEM;
92244 }
92245- memset(ptr, 0, mod->init_size);
92246- mod->module_init = ptr;
92247+ memset(ptr, 0, mod->init_size_rw);
92248+ mod->module_init_rw = ptr;
92249 } else
92250- mod->module_init = NULL;
92251+ mod->module_init_rw = NULL;
92252+
92253+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
92254+ kmemleak_not_leak(ptr);
92255+ if (!ptr) {
92256+ if (mod->module_init_rw)
92257+ module_free(mod, mod->module_init_rw);
92258+ module_free(mod, mod->module_core_rw);
92259+ return -ENOMEM;
92260+ }
92261+
92262+ pax_open_kernel();
92263+ memset(ptr, 0, mod->core_size_rx);
92264+ pax_close_kernel();
92265+ mod->module_core_rx = ptr;
92266+
92267+ if (mod->init_size_rx) {
92268+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
92269+ kmemleak_ignore(ptr);
92270+ if (!ptr && mod->init_size_rx) {
92271+ module_free_exec(mod, mod->module_core_rx);
92272+ if (mod->module_init_rw)
92273+ module_free(mod, mod->module_init_rw);
92274+ module_free(mod, mod->module_core_rw);
92275+ return -ENOMEM;
92276+ }
92277+
92278+ pax_open_kernel();
92279+ memset(ptr, 0, mod->init_size_rx);
92280+ pax_close_kernel();
92281+ mod->module_init_rx = ptr;
92282+ } else
92283+ mod->module_init_rx = NULL;
92284
92285 /* Transfer each section which specifies SHF_ALLOC */
92286 pr_debug("final section addresses:\n");
92287@@ -2828,16 +2929,45 @@ static int move_module(struct module *mod, struct load_info *info)
92288 if (!(shdr->sh_flags & SHF_ALLOC))
92289 continue;
92290
92291- if (shdr->sh_entsize & INIT_OFFSET_MASK)
92292- dest = mod->module_init
92293- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
92294- else
92295- dest = mod->module_core + shdr->sh_entsize;
92296+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
92297+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
92298+ dest = mod->module_init_rw
92299+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
92300+ else
92301+ dest = mod->module_init_rx
92302+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
92303+ } else {
92304+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
92305+ dest = mod->module_core_rw + shdr->sh_entsize;
92306+ else
92307+ dest = mod->module_core_rx + shdr->sh_entsize;
92308+ }
92309+
92310+ if (shdr->sh_type != SHT_NOBITS) {
92311+
92312+#ifdef CONFIG_PAX_KERNEXEC
92313+#ifdef CONFIG_X86_64
92314+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
92315+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
92316+#endif
92317+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
92318+ pax_open_kernel();
92319+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
92320+ pax_close_kernel();
92321+ } else
92322+#endif
92323
92324- if (shdr->sh_type != SHT_NOBITS)
92325 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
92326+ }
92327 /* Update sh_addr to point to copy in image. */
92328- shdr->sh_addr = (unsigned long)dest;
92329+
92330+#ifdef CONFIG_PAX_KERNEXEC
92331+ if (shdr->sh_flags & SHF_EXECINSTR)
92332+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
92333+ else
92334+#endif
92335+
92336+ shdr->sh_addr = (unsigned long)dest;
92337 pr_debug("\t0x%lx %s\n",
92338 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
92339 }
92340@@ -2894,12 +3024,12 @@ static void flush_module_icache(const struct module *mod)
92341 * Do it before processing of module parameters, so the module
92342 * can provide parameter accessor functions of its own.
92343 */
92344- if (mod->module_init)
92345- flush_icache_range((unsigned long)mod->module_init,
92346- (unsigned long)mod->module_init
92347- + mod->init_size);
92348- flush_icache_range((unsigned long)mod->module_core,
92349- (unsigned long)mod->module_core + mod->core_size);
92350+ if (mod->module_init_rx)
92351+ flush_icache_range((unsigned long)mod->module_init_rx,
92352+ (unsigned long)mod->module_init_rx
92353+ + mod->init_size_rx);
92354+ flush_icache_range((unsigned long)mod->module_core_rx,
92355+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
92356
92357 set_fs(old_fs);
92358 }
92359@@ -2956,8 +3086,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
92360 static void module_deallocate(struct module *mod, struct load_info *info)
92361 {
92362 percpu_modfree(mod);
92363- module_free(mod, mod->module_init);
92364- module_free(mod, mod->module_core);
92365+ module_free_exec(mod, mod->module_init_rx);
92366+ module_free_exec(mod, mod->module_core_rx);
92367+ module_free(mod, mod->module_init_rw);
92368+ module_free(mod, mod->module_core_rw);
92369 }
92370
92371 int __weak module_finalize(const Elf_Ehdr *hdr,
92372@@ -2970,7 +3102,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
92373 static int post_relocation(struct module *mod, const struct load_info *info)
92374 {
92375 /* Sort exception table now relocations are done. */
92376+ pax_open_kernel();
92377 sort_extable(mod->extable, mod->extable + mod->num_exentries);
92378+ pax_close_kernel();
92379
92380 /* Copy relocated percpu area over. */
92381 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
92382@@ -3079,11 +3213,12 @@ static int do_init_module(struct module *mod)
92383 mod->strtab = mod->core_strtab;
92384 #endif
92385 unset_module_init_ro_nx(mod);
92386- module_free(mod, mod->module_init);
92387- mod->module_init = NULL;
92388- mod->init_size = 0;
92389- mod->init_ro_size = 0;
92390- mod->init_text_size = 0;
92391+ module_free(mod, mod->module_init_rw);
92392+ module_free_exec(mod, mod->module_init_rx);
92393+ mod->module_init_rw = NULL;
92394+ mod->module_init_rx = NULL;
92395+ mod->init_size_rw = 0;
92396+ mod->init_size_rx = 0;
92397 mutex_unlock(&module_mutex);
92398 wake_up_all(&module_wq);
92399
92400@@ -3151,16 +3286,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
92401 module_bug_finalize(info->hdr, info->sechdrs, mod);
92402
92403 /* Set RO and NX regions for core */
92404- set_section_ro_nx(mod->module_core,
92405- mod->core_text_size,
92406- mod->core_ro_size,
92407- mod->core_size);
92408+ set_section_ro_nx(mod->module_core_rx,
92409+ mod->core_size_rx,
92410+ mod->core_size_rx,
92411+ mod->core_size_rx);
92412
92413 /* Set RO and NX regions for init */
92414- set_section_ro_nx(mod->module_init,
92415- mod->init_text_size,
92416- mod->init_ro_size,
92417- mod->init_size);
92418+ set_section_ro_nx(mod->module_init_rx,
92419+ mod->init_size_rx,
92420+ mod->init_size_rx,
92421+ mod->init_size_rx);
92422
92423 /* Mark state as coming so strong_try_module_get() ignores us,
92424 * but kallsyms etc. can see us. */
92425@@ -3244,9 +3379,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
92426 if (err)
92427 goto free_unload;
92428
92429+ /* Now copy in args */
92430+ mod->args = strndup_user(uargs, ~0UL >> 1);
92431+ if (IS_ERR(mod->args)) {
92432+ err = PTR_ERR(mod->args);
92433+ goto free_unload;
92434+ }
92435+
92436 /* Set up MODINFO_ATTR fields */
92437 setup_modinfo(mod, info);
92438
92439+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92440+ {
92441+ char *p, *p2;
92442+
92443+ if (strstr(mod->args, "grsec_modharden_netdev")) {
92444+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
92445+ err = -EPERM;
92446+ goto free_modinfo;
92447+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
92448+ p += sizeof("grsec_modharden_normal") - 1;
92449+ p2 = strstr(p, "_");
92450+ if (p2) {
92451+ *p2 = '\0';
92452+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
92453+ *p2 = '_';
92454+ }
92455+ err = -EPERM;
92456+ goto free_modinfo;
92457+ }
92458+ }
92459+#endif
92460+
92461 /* Fix up syms, so that st_value is a pointer to location. */
92462 err = simplify_symbols(mod, info);
92463 if (err < 0)
92464@@ -3262,13 +3426,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
92465
92466 flush_module_icache(mod);
92467
92468- /* Now copy in args */
92469- mod->args = strndup_user(uargs, ~0UL >> 1);
92470- if (IS_ERR(mod->args)) {
92471- err = PTR_ERR(mod->args);
92472- goto free_arch_cleanup;
92473- }
92474-
92475 dynamic_debug_setup(info->debug, info->num_debug);
92476
92477 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
92478@@ -3311,11 +3468,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
92479 ddebug_cleanup:
92480 dynamic_debug_remove(info->debug);
92481 synchronize_sched();
92482- kfree(mod->args);
92483- free_arch_cleanup:
92484 module_arch_cleanup(mod);
92485 free_modinfo:
92486 free_modinfo(mod);
92487+ kfree(mod->args);
92488 free_unload:
92489 module_unload_free(mod);
92490 unlink_mod:
92491@@ -3398,10 +3554,16 @@ static const char *get_ksymbol(struct module *mod,
92492 unsigned long nextval;
92493
92494 /* At worse, next value is at end of module */
92495- if (within_module_init(addr, mod))
92496- nextval = (unsigned long)mod->module_init+mod->init_text_size;
92497+ if (within_module_init_rx(addr, mod))
92498+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
92499+ else if (within_module_init_rw(addr, mod))
92500+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
92501+ else if (within_module_core_rx(addr, mod))
92502+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
92503+ else if (within_module_core_rw(addr, mod))
92504+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
92505 else
92506- nextval = (unsigned long)mod->module_core+mod->core_text_size;
92507+ return NULL;
92508
92509 /* Scan for closest preceding symbol, and next symbol. (ELF
92510 starts real symbols at 1). */
92511@@ -3652,7 +3814,7 @@ static int m_show(struct seq_file *m, void *p)
92512 return 0;
92513
92514 seq_printf(m, "%s %u",
92515- mod->name, mod->init_size + mod->core_size);
92516+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
92517 print_unload_info(m, mod);
92518
92519 /* Informative for users. */
92520@@ -3661,7 +3823,7 @@ static int m_show(struct seq_file *m, void *p)
92521 mod->state == MODULE_STATE_COMING ? "Loading":
92522 "Live");
92523 /* Used by oprofile and other similar tools. */
92524- seq_printf(m, " 0x%pK", mod->module_core);
92525+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
92526
92527 /* Taints info */
92528 if (mod->taints)
92529@@ -3697,7 +3859,17 @@ static const struct file_operations proc_modules_operations = {
92530
92531 static int __init proc_modules_init(void)
92532 {
92533+#ifndef CONFIG_GRKERNSEC_HIDESYM
92534+#ifdef CONFIG_GRKERNSEC_PROC_USER
92535+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92536+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92537+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
92538+#else
92539 proc_create("modules", 0, NULL, &proc_modules_operations);
92540+#endif
92541+#else
92542+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92543+#endif
92544 return 0;
92545 }
92546 module_init(proc_modules_init);
92547@@ -3758,14 +3930,14 @@ struct module *__module_address(unsigned long addr)
92548 {
92549 struct module *mod;
92550
92551- if (addr < module_addr_min || addr > module_addr_max)
92552+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
92553+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
92554 return NULL;
92555
92556 list_for_each_entry_rcu(mod, &modules, list) {
92557 if (mod->state == MODULE_STATE_UNFORMED)
92558 continue;
92559- if (within_module_core(addr, mod)
92560- || within_module_init(addr, mod))
92561+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
92562 return mod;
92563 }
92564 return NULL;
92565@@ -3800,11 +3972,20 @@ bool is_module_text_address(unsigned long addr)
92566 */
92567 struct module *__module_text_address(unsigned long addr)
92568 {
92569- struct module *mod = __module_address(addr);
92570+ struct module *mod;
92571+
92572+#ifdef CONFIG_X86_32
92573+ addr = ktla_ktva(addr);
92574+#endif
92575+
92576+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
92577+ return NULL;
92578+
92579+ mod = __module_address(addr);
92580+
92581 if (mod) {
92582 /* Make sure it's within the text section. */
92583- if (!within(addr, mod->module_init, mod->init_text_size)
92584- && !within(addr, mod->module_core, mod->core_text_size))
92585+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
92586 mod = NULL;
92587 }
92588 return mod;
92589diff --git a/kernel/notifier.c b/kernel/notifier.c
92590index 4803da6..1c5eea6 100644
92591--- a/kernel/notifier.c
92592+++ b/kernel/notifier.c
92593@@ -5,6 +5,7 @@
92594 #include <linux/rcupdate.h>
92595 #include <linux/vmalloc.h>
92596 #include <linux/reboot.h>
92597+#include <linux/mm.h>
92598
92599 /*
92600 * Notifier list for kernel code which wants to be called
92601@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
92602 while ((*nl) != NULL) {
92603 if (n->priority > (*nl)->priority)
92604 break;
92605- nl = &((*nl)->next);
92606+ nl = (struct notifier_block **)&((*nl)->next);
92607 }
92608- n->next = *nl;
92609+ pax_open_kernel();
92610+ *(const void **)&n->next = *nl;
92611 rcu_assign_pointer(*nl, n);
92612+ pax_close_kernel();
92613 return 0;
92614 }
92615
92616@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
92617 return 0;
92618 if (n->priority > (*nl)->priority)
92619 break;
92620- nl = &((*nl)->next);
92621+ nl = (struct notifier_block **)&((*nl)->next);
92622 }
92623- n->next = *nl;
92624+ pax_open_kernel();
92625+ *(const void **)&n->next = *nl;
92626 rcu_assign_pointer(*nl, n);
92627+ pax_close_kernel();
92628 return 0;
92629 }
92630
92631@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
92632 {
92633 while ((*nl) != NULL) {
92634 if ((*nl) == n) {
92635+ pax_open_kernel();
92636 rcu_assign_pointer(*nl, n->next);
92637+ pax_close_kernel();
92638 return 0;
92639 }
92640- nl = &((*nl)->next);
92641+ nl = (struct notifier_block **)&((*nl)->next);
92642 }
92643 return -ENOENT;
92644 }
92645diff --git a/kernel/padata.c b/kernel/padata.c
92646index 161402f..598814c 100644
92647--- a/kernel/padata.c
92648+++ b/kernel/padata.c
92649@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
92650 * seq_nr mod. number of cpus in use.
92651 */
92652
92653- seq_nr = atomic_inc_return(&pd->seq_nr);
92654+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
92655 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
92656
92657 return padata_index_to_cpu(pd, cpu_index);
92658@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
92659 padata_init_pqueues(pd);
92660 padata_init_squeues(pd);
92661 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
92662- atomic_set(&pd->seq_nr, -1);
92663+ atomic_set_unchecked(&pd->seq_nr, -1);
92664 atomic_set(&pd->reorder_objects, 0);
92665 atomic_set(&pd->refcnt, 0);
92666 pd->pinst = pinst;
92667diff --git a/kernel/panic.c b/kernel/panic.c
92668index 62e16ce..9db5047b 100644
92669--- a/kernel/panic.c
92670+++ b/kernel/panic.c
92671@@ -53,7 +53,7 @@ EXPORT_SYMBOL(panic_blink);
92672 /*
92673 * Stop ourself in panic -- architecture code may override this
92674 */
92675-void __weak panic_smp_self_stop(void)
92676+void __weak __noreturn panic_smp_self_stop(void)
92677 {
92678 while (1)
92679 cpu_relax();
92680@@ -420,7 +420,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
92681 disable_trace_on_warning();
92682
92683 pr_warn("------------[ cut here ]------------\n");
92684- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
92685+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
92686 raw_smp_processor_id(), current->pid, file, line, caller);
92687
92688 if (args)
92689@@ -474,7 +474,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
92690 */
92691 __visible void __stack_chk_fail(void)
92692 {
92693- panic("stack-protector: Kernel stack is corrupted in: %p\n",
92694+ dump_stack();
92695+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
92696 __builtin_return_address(0));
92697 }
92698 EXPORT_SYMBOL(__stack_chk_fail);
92699diff --git a/kernel/pid.c b/kernel/pid.c
92700index 9b9a266..c20ef80 100644
92701--- a/kernel/pid.c
92702+++ b/kernel/pid.c
92703@@ -33,6 +33,7 @@
92704 #include <linux/rculist.h>
92705 #include <linux/bootmem.h>
92706 #include <linux/hash.h>
92707+#include <linux/security.h>
92708 #include <linux/pid_namespace.h>
92709 #include <linux/init_task.h>
92710 #include <linux/syscalls.h>
92711@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
92712
92713 int pid_max = PID_MAX_DEFAULT;
92714
92715-#define RESERVED_PIDS 300
92716+#define RESERVED_PIDS 500
92717
92718 int pid_max_min = RESERVED_PIDS + 1;
92719 int pid_max_max = PID_MAX_LIMIT;
92720@@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task);
92721 */
92722 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
92723 {
92724+ struct task_struct *task;
92725+
92726 rcu_lockdep_assert(rcu_read_lock_held(),
92727 "find_task_by_pid_ns() needs rcu_read_lock()"
92728 " protection");
92729- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92730+
92731+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92732+
92733+ if (gr_pid_is_chrooted(task))
92734+ return NULL;
92735+
92736+ return task;
92737 }
92738
92739 struct task_struct *find_task_by_vpid(pid_t vnr)
92740@@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
92741 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
92742 }
92743
92744+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
92745+{
92746+ rcu_lockdep_assert(rcu_read_lock_held(),
92747+ "find_task_by_pid_ns() needs rcu_read_lock()"
92748+ " protection");
92749+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
92750+}
92751+
92752 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
92753 {
92754 struct pid *pid;
92755diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
92756index db95d8e..a0ca23f 100644
92757--- a/kernel/pid_namespace.c
92758+++ b/kernel/pid_namespace.c
92759@@ -253,7 +253,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
92760 void __user *buffer, size_t *lenp, loff_t *ppos)
92761 {
92762 struct pid_namespace *pid_ns = task_active_pid_ns(current);
92763- struct ctl_table tmp = *table;
92764+ ctl_table_no_const tmp = *table;
92765
92766 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
92767 return -EPERM;
92768diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
92769index 3b89464..5e38379 100644
92770--- a/kernel/posix-cpu-timers.c
92771+++ b/kernel/posix-cpu-timers.c
92772@@ -1464,14 +1464,14 @@ struct k_clock clock_posix_cpu = {
92773
92774 static __init int init_posix_cpu_timers(void)
92775 {
92776- struct k_clock process = {
92777+ static struct k_clock process = {
92778 .clock_getres = process_cpu_clock_getres,
92779 .clock_get = process_cpu_clock_get,
92780 .timer_create = process_cpu_timer_create,
92781 .nsleep = process_cpu_nsleep,
92782 .nsleep_restart = process_cpu_nsleep_restart,
92783 };
92784- struct k_clock thread = {
92785+ static struct k_clock thread = {
92786 .clock_getres = thread_cpu_clock_getres,
92787 .clock_get = thread_cpu_clock_get,
92788 .timer_create = thread_cpu_timer_create,
92789diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
92790index 424c2d4..679242f 100644
92791--- a/kernel/posix-timers.c
92792+++ b/kernel/posix-timers.c
92793@@ -43,6 +43,7 @@
92794 #include <linux/hash.h>
92795 #include <linux/posix-clock.h>
92796 #include <linux/posix-timers.h>
92797+#include <linux/grsecurity.h>
92798 #include <linux/syscalls.h>
92799 #include <linux/wait.h>
92800 #include <linux/workqueue.h>
92801@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
92802 * which we beg off on and pass to do_sys_settimeofday().
92803 */
92804
92805-static struct k_clock posix_clocks[MAX_CLOCKS];
92806+static struct k_clock *posix_clocks[MAX_CLOCKS];
92807
92808 /*
92809 * These ones are defined below.
92810@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
92811 */
92812 static __init int init_posix_timers(void)
92813 {
92814- struct k_clock clock_realtime = {
92815+ static struct k_clock clock_realtime = {
92816 .clock_getres = hrtimer_get_res,
92817 .clock_get = posix_clock_realtime_get,
92818 .clock_set = posix_clock_realtime_set,
92819@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
92820 .timer_get = common_timer_get,
92821 .timer_del = common_timer_del,
92822 };
92823- struct k_clock clock_monotonic = {
92824+ static struct k_clock clock_monotonic = {
92825 .clock_getres = hrtimer_get_res,
92826 .clock_get = posix_ktime_get_ts,
92827 .nsleep = common_nsleep,
92828@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
92829 .timer_get = common_timer_get,
92830 .timer_del = common_timer_del,
92831 };
92832- struct k_clock clock_monotonic_raw = {
92833+ static struct k_clock clock_monotonic_raw = {
92834 .clock_getres = hrtimer_get_res,
92835 .clock_get = posix_get_monotonic_raw,
92836 };
92837- struct k_clock clock_realtime_coarse = {
92838+ static struct k_clock clock_realtime_coarse = {
92839 .clock_getres = posix_get_coarse_res,
92840 .clock_get = posix_get_realtime_coarse,
92841 };
92842- struct k_clock clock_monotonic_coarse = {
92843+ static struct k_clock clock_monotonic_coarse = {
92844 .clock_getres = posix_get_coarse_res,
92845 .clock_get = posix_get_monotonic_coarse,
92846 };
92847- struct k_clock clock_tai = {
92848+ static struct k_clock clock_tai = {
92849 .clock_getres = hrtimer_get_res,
92850 .clock_get = posix_get_tai,
92851 .nsleep = common_nsleep,
92852@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
92853 .timer_get = common_timer_get,
92854 .timer_del = common_timer_del,
92855 };
92856- struct k_clock clock_boottime = {
92857+ static struct k_clock clock_boottime = {
92858 .clock_getres = hrtimer_get_res,
92859 .clock_get = posix_get_boottime,
92860 .nsleep = common_nsleep,
92861@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
92862 return;
92863 }
92864
92865- posix_clocks[clock_id] = *new_clock;
92866+ posix_clocks[clock_id] = new_clock;
92867 }
92868 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
92869
92870@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
92871 return (id & CLOCKFD_MASK) == CLOCKFD ?
92872 &clock_posix_dynamic : &clock_posix_cpu;
92873
92874- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
92875+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
92876 return NULL;
92877- return &posix_clocks[id];
92878+ return posix_clocks[id];
92879 }
92880
92881 static int common_timer_create(struct k_itimer *new_timer)
92882@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
92883 struct k_clock *kc = clockid_to_kclock(which_clock);
92884 struct k_itimer *new_timer;
92885 int error, new_timer_id;
92886- sigevent_t event;
92887+ sigevent_t event = { };
92888 int it_id_set = IT_ID_NOT_SET;
92889
92890 if (!kc)
92891@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
92892 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
92893 return -EFAULT;
92894
92895+ /* only the CLOCK_REALTIME clock can be set, all other clocks
92896+ have their clock_set fptr set to a nosettime dummy function
92897+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
92898+ call common_clock_set, which calls do_sys_settimeofday, which
92899+ we hook
92900+ */
92901+
92902 return kc->clock_set(which_clock, &new_tp);
92903 }
92904
92905diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
92906index 9a83d78..128bfc0 100644
92907--- a/kernel/power/Kconfig
92908+++ b/kernel/power/Kconfig
92909@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
92910 config HIBERNATION
92911 bool "Hibernation (aka 'suspend to disk')"
92912 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
92913+ depends on !GRKERNSEC_KMEM
92914+ depends on !PAX_MEMORY_SANITIZE
92915 select HIBERNATE_CALLBACKS
92916 select LZO_COMPRESS
92917 select LZO_DECOMPRESS
92918diff --git a/kernel/power/process.c b/kernel/power/process.c
92919index 4ee194e..925778f 100644
92920--- a/kernel/power/process.c
92921+++ b/kernel/power/process.c
92922@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
92923 unsigned int elapsed_msecs;
92924 bool wakeup = false;
92925 int sleep_usecs = USEC_PER_MSEC;
92926+ bool timedout = false;
92927
92928 do_gettimeofday(&start);
92929
92930@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
92931
92932 while (true) {
92933 todo = 0;
92934+ if (time_after(jiffies, end_time))
92935+ timedout = true;
92936 read_lock(&tasklist_lock);
92937 do_each_thread(g, p) {
92938 if (p == current || !freeze_task(p))
92939 continue;
92940
92941- if (!freezer_should_skip(p))
92942+ if (!freezer_should_skip(p)) {
92943 todo++;
92944+ if (timedout) {
92945+ printk(KERN_ERR "Task refusing to freeze:\n");
92946+ sched_show_task(p);
92947+ }
92948+ }
92949 } while_each_thread(g, p);
92950 read_unlock(&tasklist_lock);
92951
92952@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
92953 todo += wq_busy;
92954 }
92955
92956- if (!todo || time_after(jiffies, end_time))
92957+ if (!todo || timedout)
92958 break;
92959
92960 if (pm_wakeup_pending()) {
92961diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
92962index 13e839d..8a71f12 100644
92963--- a/kernel/printk/printk.c
92964+++ b/kernel/printk/printk.c
92965@@ -480,6 +480,11 @@ static int check_syslog_permissions(int type, bool from_file)
92966 if (from_file && type != SYSLOG_ACTION_OPEN)
92967 return 0;
92968
92969+#ifdef CONFIG_GRKERNSEC_DMESG
92970+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
92971+ return -EPERM;
92972+#endif
92973+
92974 if (syslog_action_restricted(type)) {
92975 if (capable(CAP_SYSLOG))
92976 return 0;
92977diff --git a/kernel/profile.c b/kernel/profile.c
92978index 54bf5ba..df6e0a2 100644
92979--- a/kernel/profile.c
92980+++ b/kernel/profile.c
92981@@ -37,7 +37,7 @@ struct profile_hit {
92982 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
92983 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
92984
92985-static atomic_t *prof_buffer;
92986+static atomic_unchecked_t *prof_buffer;
92987 static unsigned long prof_len, prof_shift;
92988
92989 int prof_on __read_mostly;
92990@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
92991 hits[i].pc = 0;
92992 continue;
92993 }
92994- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
92995+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
92996 hits[i].hits = hits[i].pc = 0;
92997 }
92998 }
92999@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
93000 * Add the current hit(s) and flush the write-queue out
93001 * to the global buffer:
93002 */
93003- atomic_add(nr_hits, &prof_buffer[pc]);
93004+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
93005 for (i = 0; i < NR_PROFILE_HIT; ++i) {
93006- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93007+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93008 hits[i].pc = hits[i].hits = 0;
93009 }
93010 out:
93011@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
93012 {
93013 unsigned long pc;
93014 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
93015- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93016+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93017 }
93018 #endif /* !CONFIG_SMP */
93019
93020@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
93021 return -EFAULT;
93022 buf++; p++; count--; read++;
93023 }
93024- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
93025+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
93026 if (copy_to_user(buf, (void *)pnt, count))
93027 return -EFAULT;
93028 read += count;
93029@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
93030 }
93031 #endif
93032 profile_discard_flip_buffers();
93033- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
93034+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
93035 return count;
93036 }
93037
93038diff --git a/kernel/ptrace.c b/kernel/ptrace.c
93039index adf9862..9d86345 100644
93040--- a/kernel/ptrace.c
93041+++ b/kernel/ptrace.c
93042@@ -327,7 +327,7 @@ static int ptrace_attach(struct task_struct *task, long request,
93043 if (seize)
93044 flags |= PT_SEIZED;
93045 rcu_read_lock();
93046- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
93047+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
93048 flags |= PT_PTRACE_CAP;
93049 rcu_read_unlock();
93050 task->ptrace = flags;
93051@@ -538,7 +538,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
93052 break;
93053 return -EIO;
93054 }
93055- if (copy_to_user(dst, buf, retval))
93056+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
93057 return -EFAULT;
93058 copied += retval;
93059 src += retval;
93060@@ -806,7 +806,7 @@ int ptrace_request(struct task_struct *child, long request,
93061 bool seized = child->ptrace & PT_SEIZED;
93062 int ret = -EIO;
93063 siginfo_t siginfo, *si;
93064- void __user *datavp = (void __user *) data;
93065+ void __user *datavp = (__force void __user *) data;
93066 unsigned long __user *datalp = datavp;
93067 unsigned long flags;
93068
93069@@ -1052,14 +1052,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
93070 goto out;
93071 }
93072
93073+ if (gr_handle_ptrace(child, request)) {
93074+ ret = -EPERM;
93075+ goto out_put_task_struct;
93076+ }
93077+
93078 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
93079 ret = ptrace_attach(child, request, addr, data);
93080 /*
93081 * Some architectures need to do book-keeping after
93082 * a ptrace attach.
93083 */
93084- if (!ret)
93085+ if (!ret) {
93086 arch_ptrace_attach(child);
93087+ gr_audit_ptrace(child);
93088+ }
93089 goto out_put_task_struct;
93090 }
93091
93092@@ -1087,7 +1094,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
93093 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
93094 if (copied != sizeof(tmp))
93095 return -EIO;
93096- return put_user(tmp, (unsigned long __user *)data);
93097+ return put_user(tmp, (__force unsigned long __user *)data);
93098 }
93099
93100 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
93101@@ -1181,7 +1188,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
93102 }
93103
93104 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
93105- compat_long_t, addr, compat_long_t, data)
93106+ compat_ulong_t, addr, compat_ulong_t, data)
93107 {
93108 struct task_struct *child;
93109 long ret;
93110@@ -1197,14 +1204,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
93111 goto out;
93112 }
93113
93114+ if (gr_handle_ptrace(child, request)) {
93115+ ret = -EPERM;
93116+ goto out_put_task_struct;
93117+ }
93118+
93119 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
93120 ret = ptrace_attach(child, request, addr, data);
93121 /*
93122 * Some architectures need to do book-keeping after
93123 * a ptrace attach.
93124 */
93125- if (!ret)
93126+ if (!ret) {
93127 arch_ptrace_attach(child);
93128+ gr_audit_ptrace(child);
93129+ }
93130 goto out_put_task_struct;
93131 }
93132
93133diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
93134index 948a769..5ca842b 100644
93135--- a/kernel/rcu/rcutorture.c
93136+++ b/kernel/rcu/rcutorture.c
93137@@ -124,12 +124,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
93138 rcu_torture_count) = { 0 };
93139 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
93140 rcu_torture_batch) = { 0 };
93141-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93142-static atomic_t n_rcu_torture_alloc;
93143-static atomic_t n_rcu_torture_alloc_fail;
93144-static atomic_t n_rcu_torture_free;
93145-static atomic_t n_rcu_torture_mberror;
93146-static atomic_t n_rcu_torture_error;
93147+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93148+static atomic_unchecked_t n_rcu_torture_alloc;
93149+static atomic_unchecked_t n_rcu_torture_alloc_fail;
93150+static atomic_unchecked_t n_rcu_torture_free;
93151+static atomic_unchecked_t n_rcu_torture_mberror;
93152+static atomic_unchecked_t n_rcu_torture_error;
93153 static long n_rcu_torture_barrier_error;
93154 static long n_rcu_torture_boost_ktrerror;
93155 static long n_rcu_torture_boost_rterror;
93156@@ -200,11 +200,11 @@ rcu_torture_alloc(void)
93157
93158 spin_lock_bh(&rcu_torture_lock);
93159 if (list_empty(&rcu_torture_freelist)) {
93160- atomic_inc(&n_rcu_torture_alloc_fail);
93161+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
93162 spin_unlock_bh(&rcu_torture_lock);
93163 return NULL;
93164 }
93165- atomic_inc(&n_rcu_torture_alloc);
93166+ atomic_inc_unchecked(&n_rcu_torture_alloc);
93167 p = rcu_torture_freelist.next;
93168 list_del_init(p);
93169 spin_unlock_bh(&rcu_torture_lock);
93170@@ -217,7 +217,7 @@ rcu_torture_alloc(void)
93171 static void
93172 rcu_torture_free(struct rcu_torture *p)
93173 {
93174- atomic_inc(&n_rcu_torture_free);
93175+ atomic_inc_unchecked(&n_rcu_torture_free);
93176 spin_lock_bh(&rcu_torture_lock);
93177 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
93178 spin_unlock_bh(&rcu_torture_lock);
93179@@ -301,7 +301,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
93180 i = rp->rtort_pipe_count;
93181 if (i > RCU_TORTURE_PIPE_LEN)
93182 i = RCU_TORTURE_PIPE_LEN;
93183- atomic_inc(&rcu_torture_wcount[i]);
93184+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93185 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
93186 rp->rtort_mbtest = 0;
93187 return true;
93188@@ -808,7 +808,7 @@ rcu_torture_writer(void *arg)
93189 i = old_rp->rtort_pipe_count;
93190 if (i > RCU_TORTURE_PIPE_LEN)
93191 i = RCU_TORTURE_PIPE_LEN;
93192- atomic_inc(&rcu_torture_wcount[i]);
93193+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93194 old_rp->rtort_pipe_count++;
93195 switch (synctype[torture_random(&rand) % nsynctypes]) {
93196 case RTWS_DEF_FREE:
93197@@ -926,7 +926,7 @@ static void rcu_torture_timer(unsigned long unused)
93198 return;
93199 }
93200 if (p->rtort_mbtest == 0)
93201- atomic_inc(&n_rcu_torture_mberror);
93202+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93203 spin_lock(&rand_lock);
93204 cur_ops->read_delay(&rand);
93205 n_rcu_torture_timers++;
93206@@ -996,7 +996,7 @@ rcu_torture_reader(void *arg)
93207 continue;
93208 }
93209 if (p->rtort_mbtest == 0)
93210- atomic_inc(&n_rcu_torture_mberror);
93211+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93212 cur_ops->read_delay(&rand);
93213 preempt_disable();
93214 pipe_count = p->rtort_pipe_count;
93215@@ -1054,15 +1054,15 @@ rcu_torture_printk(char *page)
93216 }
93217 page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
93218 page += sprintf(page,
93219- "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
93220+ "rtc: %pP ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
93221 rcu_torture_current,
93222 rcu_torture_current_version,
93223 list_empty(&rcu_torture_freelist),
93224- atomic_read(&n_rcu_torture_alloc),
93225- atomic_read(&n_rcu_torture_alloc_fail),
93226- atomic_read(&n_rcu_torture_free));
93227+ atomic_read_unchecked(&n_rcu_torture_alloc),
93228+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
93229+ atomic_read_unchecked(&n_rcu_torture_free));
93230 page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
93231- atomic_read(&n_rcu_torture_mberror),
93232+ atomic_read_unchecked(&n_rcu_torture_mberror),
93233 n_rcu_torture_boost_ktrerror,
93234 n_rcu_torture_boost_rterror);
93235 page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
93236@@ -1075,14 +1075,14 @@ rcu_torture_printk(char *page)
93237 n_barrier_attempts,
93238 n_rcu_torture_barrier_error);
93239 page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
93240- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
93241+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
93242 n_rcu_torture_barrier_error != 0 ||
93243 n_rcu_torture_boost_ktrerror != 0 ||
93244 n_rcu_torture_boost_rterror != 0 ||
93245 n_rcu_torture_boost_failure != 0 ||
93246 i > 1) {
93247 page += sprintf(page, "!!! ");
93248- atomic_inc(&n_rcu_torture_error);
93249+ atomic_inc_unchecked(&n_rcu_torture_error);
93250 WARN_ON_ONCE(1);
93251 }
93252 page += sprintf(page, "Reader Pipe: ");
93253@@ -1096,7 +1096,7 @@ rcu_torture_printk(char *page)
93254 page += sprintf(page, "Free-Block Circulation: ");
93255 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93256 page += sprintf(page, " %d",
93257- atomic_read(&rcu_torture_wcount[i]));
93258+ atomic_read_unchecked(&rcu_torture_wcount[i]));
93259 }
93260 page += sprintf(page, "\n");
93261 if (cur_ops->stats)
93262@@ -1461,7 +1461,7 @@ rcu_torture_cleanup(void)
93263
93264 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
93265
93266- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
93267+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
93268 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
93269 else if (torture_onoff_failures())
93270 rcu_torture_print_module_parms(cur_ops,
93271@@ -1584,18 +1584,18 @@ rcu_torture_init(void)
93272
93273 rcu_torture_current = NULL;
93274 rcu_torture_current_version = 0;
93275- atomic_set(&n_rcu_torture_alloc, 0);
93276- atomic_set(&n_rcu_torture_alloc_fail, 0);
93277- atomic_set(&n_rcu_torture_free, 0);
93278- atomic_set(&n_rcu_torture_mberror, 0);
93279- atomic_set(&n_rcu_torture_error, 0);
93280+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
93281+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
93282+ atomic_set_unchecked(&n_rcu_torture_free, 0);
93283+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
93284+ atomic_set_unchecked(&n_rcu_torture_error, 0);
93285 n_rcu_torture_barrier_error = 0;
93286 n_rcu_torture_boost_ktrerror = 0;
93287 n_rcu_torture_boost_rterror = 0;
93288 n_rcu_torture_boost_failure = 0;
93289 n_rcu_torture_boosts = 0;
93290 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
93291- atomic_set(&rcu_torture_wcount[i], 0);
93292+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
93293 for_each_possible_cpu(cpu) {
93294 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93295 per_cpu(rcu_torture_count, cpu)[i] = 0;
93296diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
93297index c639556..cf0a0d5 100644
93298--- a/kernel/rcu/srcu.c
93299+++ b/kernel/rcu/srcu.c
93300@@ -298,9 +298,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
93301
93302 idx = ACCESS_ONCE(sp->completed) & 0x1;
93303 preempt_disable();
93304- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
93305+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
93306 smp_mb(); /* B */ /* Avoid leaking the critical section. */
93307- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
93308+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
93309 preempt_enable();
93310 return idx;
93311 }
93312diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
93313index d9efcc1..ea543e9 100644
93314--- a/kernel/rcu/tiny.c
93315+++ b/kernel/rcu/tiny.c
93316@@ -42,7 +42,7 @@
93317 /* Forward declarations for tiny_plugin.h. */
93318 struct rcu_ctrlblk;
93319 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
93320-static void rcu_process_callbacks(struct softirq_action *unused);
93321+static void rcu_process_callbacks(void);
93322 static void __call_rcu(struct rcu_head *head,
93323 void (*func)(struct rcu_head *rcu),
93324 struct rcu_ctrlblk *rcp);
93325@@ -308,7 +308,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
93326 false));
93327 }
93328
93329-static void rcu_process_callbacks(struct softirq_action *unused)
93330+static __latent_entropy void rcu_process_callbacks(void)
93331 {
93332 __rcu_process_callbacks(&rcu_sched_ctrlblk);
93333 __rcu_process_callbacks(&rcu_bh_ctrlblk);
93334diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
93335index 858c565..7efd915 100644
93336--- a/kernel/rcu/tiny_plugin.h
93337+++ b/kernel/rcu/tiny_plugin.h
93338@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
93339 dump_stack();
93340 }
93341 if (*rcp->curtail && ULONG_CMP_GE(j, js))
93342- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
93343+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
93344 3 * rcu_jiffies_till_stall_check() + 3;
93345 else if (ULONG_CMP_GE(j, js))
93346- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93347+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93348 }
93349
93350 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
93351 {
93352 rcp->ticks_this_gp = 0;
93353 rcp->gp_start = jiffies;
93354- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93355+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93356 }
93357
93358 static void check_cpu_stalls(void)
93359diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
93360index 625d0b0..0bce4d6 100644
93361--- a/kernel/rcu/tree.c
93362+++ b/kernel/rcu/tree.c
93363@@ -263,7 +263,7 @@ static void rcu_momentary_dyntick_idle(void)
93364 */
93365 rdtp = this_cpu_ptr(&rcu_dynticks);
93366 smp_mb__before_atomic(); /* Earlier stuff before QS. */
93367- atomic_add(2, &rdtp->dynticks); /* QS. */
93368+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
93369 smp_mb__after_atomic(); /* Later stuff after QS. */
93370 break;
93371 }
93372@@ -523,9 +523,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
93373 rcu_prepare_for_idle(smp_processor_id());
93374 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
93375 smp_mb__before_atomic(); /* See above. */
93376- atomic_inc(&rdtp->dynticks);
93377+ atomic_inc_unchecked(&rdtp->dynticks);
93378 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
93379- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
93380+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
93381
93382 /*
93383 * It is illegal to enter an extended quiescent state while
93384@@ -643,10 +643,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
93385 int user)
93386 {
93387 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
93388- atomic_inc(&rdtp->dynticks);
93389+ atomic_inc_unchecked(&rdtp->dynticks);
93390 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
93391 smp_mb__after_atomic(); /* See above. */
93392- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
93393+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
93394 rcu_cleanup_after_idle(smp_processor_id());
93395 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
93396 if (!user && !is_idle_task(current)) {
93397@@ -767,14 +767,14 @@ void rcu_nmi_enter(void)
93398 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
93399
93400 if (rdtp->dynticks_nmi_nesting == 0 &&
93401- (atomic_read(&rdtp->dynticks) & 0x1))
93402+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
93403 return;
93404 rdtp->dynticks_nmi_nesting++;
93405 smp_mb__before_atomic(); /* Force delay from prior write. */
93406- atomic_inc(&rdtp->dynticks);
93407+ atomic_inc_unchecked(&rdtp->dynticks);
93408 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
93409 smp_mb__after_atomic(); /* See above. */
93410- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
93411+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
93412 }
93413
93414 /**
93415@@ -793,9 +793,9 @@ void rcu_nmi_exit(void)
93416 return;
93417 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
93418 smp_mb__before_atomic(); /* See above. */
93419- atomic_inc(&rdtp->dynticks);
93420+ atomic_inc_unchecked(&rdtp->dynticks);
93421 smp_mb__after_atomic(); /* Force delay to next write. */
93422- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
93423+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
93424 }
93425
93426 /**
93427@@ -808,7 +808,7 @@ void rcu_nmi_exit(void)
93428 */
93429 bool notrace __rcu_is_watching(void)
93430 {
93431- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
93432+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
93433 }
93434
93435 /**
93436@@ -891,7 +891,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
93437 static int dyntick_save_progress_counter(struct rcu_data *rdp,
93438 bool *isidle, unsigned long *maxj)
93439 {
93440- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
93441+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
93442 rcu_sysidle_check_cpu(rdp, isidle, maxj);
93443 if ((rdp->dynticks_snap & 0x1) == 0) {
93444 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
93445@@ -920,7 +920,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
93446 int *rcrmp;
93447 unsigned int snap;
93448
93449- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
93450+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
93451 snap = (unsigned int)rdp->dynticks_snap;
93452
93453 /*
93454@@ -983,10 +983,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
93455 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
93456 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
93457 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
93458- ACCESS_ONCE(rdp->cond_resched_completed) =
93459+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
93460 ACCESS_ONCE(rdp->mynode->completed);
93461 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
93462- ACCESS_ONCE(*rcrmp) =
93463+ ACCESS_ONCE_RW(*rcrmp) =
93464 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
93465 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
93466 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
93467@@ -1008,7 +1008,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
93468 rsp->gp_start = j;
93469 smp_wmb(); /* Record start time before stall time. */
93470 j1 = rcu_jiffies_till_stall_check();
93471- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
93472+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
93473 rsp->jiffies_resched = j + j1 / 2;
93474 }
93475
93476@@ -1052,7 +1052,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
93477 raw_spin_unlock_irqrestore(&rnp->lock, flags);
93478 return;
93479 }
93480- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
93481+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
93482 raw_spin_unlock_irqrestore(&rnp->lock, flags);
93483
93484 /*
93485@@ -1130,7 +1130,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
93486
93487 raw_spin_lock_irqsave(&rnp->lock, flags);
93488 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
93489- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
93490+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
93491 3 * rcu_jiffies_till_stall_check() + 3;
93492 raw_spin_unlock_irqrestore(&rnp->lock, flags);
93493
93494@@ -1214,7 +1214,7 @@ void rcu_cpu_stall_reset(void)
93495 struct rcu_state *rsp;
93496
93497 for_each_rcu_flavor(rsp)
93498- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
93499+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
93500 }
93501
93502 /*
93503@@ -1594,7 +1594,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
93504 raw_spin_unlock_irq(&rnp->lock);
93505 return 0;
93506 }
93507- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
93508+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
93509
93510 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
93511 /*
93512@@ -1635,9 +1635,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
93513 rdp = this_cpu_ptr(rsp->rda);
93514 rcu_preempt_check_blocked_tasks(rnp);
93515 rnp->qsmask = rnp->qsmaskinit;
93516- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
93517+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
93518 WARN_ON_ONCE(rnp->completed != rsp->completed);
93519- ACCESS_ONCE(rnp->completed) = rsp->completed;
93520+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
93521 if (rnp == rdp->mynode)
93522 (void)__note_gp_changes(rsp, rnp, rdp);
93523 rcu_preempt_boost_start_gp(rnp);
93524@@ -1687,7 +1687,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
93525 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
93526 raw_spin_lock_irq(&rnp->lock);
93527 smp_mb__after_unlock_lock();
93528- ACCESS_ONCE(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
93529+ ACCESS_ONCE_RW(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
93530 raw_spin_unlock_irq(&rnp->lock);
93531 }
93532 return fqs_state;
93533@@ -1732,7 +1732,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
93534 rcu_for_each_node_breadth_first(rsp, rnp) {
93535 raw_spin_lock_irq(&rnp->lock);
93536 smp_mb__after_unlock_lock();
93537- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
93538+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
93539 rdp = this_cpu_ptr(rsp->rda);
93540 if (rnp == rdp->mynode)
93541 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
93542@@ -1747,14 +1747,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
93543 rcu_nocb_gp_set(rnp, nocb);
93544
93545 /* Declare grace period done. */
93546- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
93547+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
93548 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
93549 rsp->fqs_state = RCU_GP_IDLE;
93550 rdp = this_cpu_ptr(rsp->rda);
93551 /* Advance CBs to reduce false positives below. */
93552 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
93553 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
93554- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93555+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93556 trace_rcu_grace_period(rsp->name,
93557 ACCESS_ONCE(rsp->gpnum),
93558 TPS("newreq"));
93559@@ -1879,7 +1879,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
93560 */
93561 return false;
93562 }
93563- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93564+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93565 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
93566 TPS("newreq"));
93567
93568@@ -2100,7 +2100,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
93569 rsp->qlen += rdp->qlen;
93570 rdp->n_cbs_orphaned += rdp->qlen;
93571 rdp->qlen_lazy = 0;
93572- ACCESS_ONCE(rdp->qlen) = 0;
93573+ ACCESS_ONCE_RW(rdp->qlen) = 0;
93574 }
93575
93576 /*
93577@@ -2347,7 +2347,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
93578 }
93579 smp_mb(); /* List handling before counting for rcu_barrier(). */
93580 rdp->qlen_lazy -= count_lazy;
93581- ACCESS_ONCE(rdp->qlen) -= count;
93582+ ACCESS_ONCE_RW(rdp->qlen) -= count;
93583 rdp->n_cbs_invoked += count;
93584
93585 /* Reinstate batch limit if we have worked down the excess. */
93586@@ -2492,7 +2492,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
93587 if (rnp_old != NULL)
93588 raw_spin_unlock(&rnp_old->fqslock);
93589 if (ret) {
93590- ACCESS_ONCE(rsp->n_force_qs_lh)++;
93591+ ACCESS_ONCE_RW(rsp->n_force_qs_lh)++;
93592 return;
93593 }
93594 rnp_old = rnp;
93595@@ -2504,11 +2504,11 @@ static void force_quiescent_state(struct rcu_state *rsp)
93596 smp_mb__after_unlock_lock();
93597 raw_spin_unlock(&rnp_old->fqslock);
93598 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
93599- ACCESS_ONCE(rsp->n_force_qs_lh)++;
93600+ ACCESS_ONCE_RW(rsp->n_force_qs_lh)++;
93601 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
93602 return; /* Someone beat us to it. */
93603 }
93604- ACCESS_ONCE(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
93605+ ACCESS_ONCE_RW(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
93606 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
93607 wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
93608 }
93609@@ -2553,7 +2553,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
93610 /*
93611 * Do RCU core processing for the current CPU.
93612 */
93613-static void rcu_process_callbacks(struct softirq_action *unused)
93614+static void rcu_process_callbacks(void)
93615 {
93616 struct rcu_state *rsp;
93617
93618@@ -2665,7 +2665,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
93619 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
93620 if (debug_rcu_head_queue(head)) {
93621 /* Probable double call_rcu(), so leak the callback. */
93622- ACCESS_ONCE(head->func) = rcu_leak_callback;
93623+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
93624 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
93625 return;
93626 }
93627@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
93628 local_irq_restore(flags);
93629 return;
93630 }
93631- ACCESS_ONCE(rdp->qlen)++;
93632+ ACCESS_ONCE_RW(rdp->qlen)++;
93633 if (lazy)
93634 rdp->qlen_lazy++;
93635 else
93636@@ -2968,11 +2968,11 @@ void synchronize_sched_expedited(void)
93637 * counter wrap on a 32-bit system. Quite a few more CPUs would of
93638 * course be required on a 64-bit system.
93639 */
93640- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
93641+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
93642 (ulong)atomic_long_read(&rsp->expedited_done) +
93643 ULONG_MAX / 8)) {
93644 synchronize_sched();
93645- atomic_long_inc(&rsp->expedited_wrap);
93646+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
93647 return;
93648 }
93649
93650@@ -2980,7 +2980,7 @@ void synchronize_sched_expedited(void)
93651 * Take a ticket. Note that atomic_inc_return() implies a
93652 * full memory barrier.
93653 */
93654- snap = atomic_long_inc_return(&rsp->expedited_start);
93655+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
93656 firstsnap = snap;
93657 get_online_cpus();
93658 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
93659@@ -2993,14 +2993,14 @@ void synchronize_sched_expedited(void)
93660 synchronize_sched_expedited_cpu_stop,
93661 NULL) == -EAGAIN) {
93662 put_online_cpus();
93663- atomic_long_inc(&rsp->expedited_tryfail);
93664+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
93665
93666 /* Check to see if someone else did our work for us. */
93667 s = atomic_long_read(&rsp->expedited_done);
93668 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
93669 /* ensure test happens before caller kfree */
93670 smp_mb__before_atomic(); /* ^^^ */
93671- atomic_long_inc(&rsp->expedited_workdone1);
93672+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
93673 return;
93674 }
93675
93676@@ -3009,7 +3009,7 @@ void synchronize_sched_expedited(void)
93677 udelay(trycount * num_online_cpus());
93678 } else {
93679 wait_rcu_gp(call_rcu_sched);
93680- atomic_long_inc(&rsp->expedited_normal);
93681+ atomic_long_inc_unchecked(&rsp->expedited_normal);
93682 return;
93683 }
93684
93685@@ -3018,7 +3018,7 @@ void synchronize_sched_expedited(void)
93686 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
93687 /* ensure test happens before caller kfree */
93688 smp_mb__before_atomic(); /* ^^^ */
93689- atomic_long_inc(&rsp->expedited_workdone2);
93690+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
93691 return;
93692 }
93693
93694@@ -3030,10 +3030,10 @@ void synchronize_sched_expedited(void)
93695 * period works for us.
93696 */
93697 get_online_cpus();
93698- snap = atomic_long_read(&rsp->expedited_start);
93699+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
93700 smp_mb(); /* ensure read is before try_stop_cpus(). */
93701 }
93702- atomic_long_inc(&rsp->expedited_stoppedcpus);
93703+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
93704
93705 /*
93706 * Everyone up to our most recent fetch is covered by our grace
93707@@ -3042,16 +3042,16 @@ void synchronize_sched_expedited(void)
93708 * than we did already did their update.
93709 */
93710 do {
93711- atomic_long_inc(&rsp->expedited_done_tries);
93712+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
93713 s = atomic_long_read(&rsp->expedited_done);
93714 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
93715 /* ensure test happens before caller kfree */
93716 smp_mb__before_atomic(); /* ^^^ */
93717- atomic_long_inc(&rsp->expedited_done_lost);
93718+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
93719 break;
93720 }
93721 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
93722- atomic_long_inc(&rsp->expedited_done_exit);
93723+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
93724
93725 put_online_cpus();
93726 }
93727@@ -3257,7 +3257,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
93728 * ACCESS_ONCE() to prevent the compiler from speculating
93729 * the increment to precede the early-exit check.
93730 */
93731- ACCESS_ONCE(rsp->n_barrier_done)++;
93732+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
93733 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
93734 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
93735 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
93736@@ -3307,7 +3307,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
93737
93738 /* Increment ->n_barrier_done to prevent duplicate work. */
93739 smp_mb(); /* Keep increment after above mechanism. */
93740- ACCESS_ONCE(rsp->n_barrier_done)++;
93741+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
93742 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
93743 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
93744 smp_mb(); /* Keep increment before caller's subsequent code. */
93745@@ -3352,10 +3352,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
93746 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
93747 init_callback_list(rdp);
93748 rdp->qlen_lazy = 0;
93749- ACCESS_ONCE(rdp->qlen) = 0;
93750+ ACCESS_ONCE_RW(rdp->qlen) = 0;
93751 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
93752 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
93753- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
93754+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
93755 rdp->cpu = cpu;
93756 rdp->rsp = rsp;
93757 rcu_boot_init_nocb_percpu_data(rdp);
93758@@ -3388,8 +3388,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
93759 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
93760 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
93761 rcu_sysidle_init_percpu_data(rdp->dynticks);
93762- atomic_set(&rdp->dynticks->dynticks,
93763- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
93764+ atomic_set_unchecked(&rdp->dynticks->dynticks,
93765+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
93766 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
93767
93768 /* Add CPU to rcu_node bitmasks. */
93769diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
93770index 0f69a79..c85c2dc 100644
93771--- a/kernel/rcu/tree.h
93772+++ b/kernel/rcu/tree.h
93773@@ -87,11 +87,11 @@ struct rcu_dynticks {
93774 long long dynticks_nesting; /* Track irq/process nesting level. */
93775 /* Process level is worth LLONG_MAX/2. */
93776 int dynticks_nmi_nesting; /* Track NMI nesting level. */
93777- atomic_t dynticks; /* Even value for idle, else odd. */
93778+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
93779 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
93780 long long dynticks_idle_nesting;
93781 /* irq/process nesting level from idle. */
93782- atomic_t dynticks_idle; /* Even value for idle, else odd. */
93783+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
93784 /* "Idle" excludes userspace execution. */
93785 unsigned long dynticks_idle_jiffies;
93786 /* End of last non-NMI non-idle period. */
93787@@ -435,17 +435,17 @@ struct rcu_state {
93788 /* _rcu_barrier(). */
93789 /* End of fields guarded by barrier_mutex. */
93790
93791- atomic_long_t expedited_start; /* Starting ticket. */
93792- atomic_long_t expedited_done; /* Done ticket. */
93793- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
93794- atomic_long_t expedited_tryfail; /* # acquisition failures. */
93795- atomic_long_t expedited_workdone1; /* # done by others #1. */
93796- atomic_long_t expedited_workdone2; /* # done by others #2. */
93797- atomic_long_t expedited_normal; /* # fallbacks to normal. */
93798- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
93799- atomic_long_t expedited_done_tries; /* # tries to update _done. */
93800- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
93801- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
93802+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
93803+ atomic_long_t expedited_done; /* Done ticket. */
93804+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
93805+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
93806+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
93807+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
93808+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
93809+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
93810+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
93811+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
93812+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
93813
93814 unsigned long jiffies_force_qs; /* Time at which to invoke */
93815 /* force_quiescent_state(). */
93816diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
93817index 02ac0fb..4aa4a36 100644
93818--- a/kernel/rcu/tree_plugin.h
93819+++ b/kernel/rcu/tree_plugin.h
93820@@ -735,7 +735,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
93821 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
93822 {
93823 return !rcu_preempted_readers_exp(rnp) &&
93824- ACCESS_ONCE(rnp->expmask) == 0;
93825+ ACCESS_ONCE_RW(rnp->expmask) == 0;
93826 }
93827
93828 /*
93829@@ -897,7 +897,7 @@ void synchronize_rcu_expedited(void)
93830
93831 /* Clean up and exit. */
93832 smp_mb(); /* ensure expedited GP seen before counter increment. */
93833- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
93834+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
93835 unlock_mb_ret:
93836 mutex_unlock(&sync_rcu_preempt_exp_mutex);
93837 mb_ret:
93838@@ -1447,7 +1447,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
93839 free_cpumask_var(cm);
93840 }
93841
93842-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
93843+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
93844 .store = &rcu_cpu_kthread_task,
93845 .thread_should_run = rcu_cpu_kthread_should_run,
93846 .thread_fn = rcu_cpu_kthread,
93847@@ -1926,7 +1926,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
93848 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
93849 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
93850 cpu, ticks_value, ticks_title,
93851- atomic_read(&rdtp->dynticks) & 0xfff,
93852+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
93853 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
93854 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
93855 fast_no_hz);
93856@@ -2079,7 +2079,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
93857
93858 /* Enqueue the callback on the nocb list and update counts. */
93859 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
93860- ACCESS_ONCE(*old_rhpp) = rhp;
93861+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
93862 atomic_long_add(rhcount, &rdp->nocb_q_count);
93863 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
93864
93865@@ -2255,12 +2255,12 @@ static int rcu_nocb_kthread(void *arg)
93866 * Extract queued callbacks, update counts, and wait
93867 * for a grace period to elapse.
93868 */
93869- ACCESS_ONCE(rdp->nocb_head) = NULL;
93870+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
93871 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
93872 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
93873 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
93874- ACCESS_ONCE(rdp->nocb_p_count) += c;
93875- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
93876+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
93877+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
93878 rcu_nocb_wait_gp(rdp);
93879
93880 /* Each pass through the following loop invokes a callback. */
93881@@ -2286,8 +2286,8 @@ static int rcu_nocb_kthread(void *arg)
93882 list = next;
93883 }
93884 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
93885- ACCESS_ONCE(rdp->nocb_p_count) -= c;
93886- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
93887+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
93888+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
93889 rdp->n_nocbs_invoked += c;
93890 }
93891 return 0;
93892@@ -2304,7 +2304,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
93893 {
93894 if (!rcu_nocb_need_deferred_wakeup(rdp))
93895 return;
93896- ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
93897+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = false;
93898 wake_up(&rdp->nocb_wq);
93899 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
93900 }
93901@@ -2330,7 +2330,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
93902 t = kthread_run(rcu_nocb_kthread, rdp,
93903 "rcuo%c/%d", rsp->abbr, cpu);
93904 BUG_ON(IS_ERR(t));
93905- ACCESS_ONCE(rdp->nocb_kthread) = t;
93906+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
93907 }
93908 }
93909
93910@@ -2461,11 +2461,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
93911
93912 /* Record start of fully idle period. */
93913 j = jiffies;
93914- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
93915+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
93916 smp_mb__before_atomic();
93917- atomic_inc(&rdtp->dynticks_idle);
93918+ atomic_inc_unchecked(&rdtp->dynticks_idle);
93919 smp_mb__after_atomic();
93920- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
93921+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
93922 }
93923
93924 /*
93925@@ -2530,9 +2530,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
93926
93927 /* Record end of idle period. */
93928 smp_mb__before_atomic();
93929- atomic_inc(&rdtp->dynticks_idle);
93930+ atomic_inc_unchecked(&rdtp->dynticks_idle);
93931 smp_mb__after_atomic();
93932- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
93933+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
93934
93935 /*
93936 * If we are the timekeeping CPU, we are permitted to be non-idle
93937@@ -2573,7 +2573,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
93938 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
93939
93940 /* Pick up current idle and NMI-nesting counter and check. */
93941- cur = atomic_read(&rdtp->dynticks_idle);
93942+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
93943 if (cur & 0x1) {
93944 *isidle = false; /* We are not idle! */
93945 return;
93946@@ -2622,7 +2622,7 @@ static void rcu_sysidle(unsigned long j)
93947 case RCU_SYSIDLE_NOT:
93948
93949 /* First time all are idle, so note a short idle period. */
93950- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
93951+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
93952 break;
93953
93954 case RCU_SYSIDLE_SHORT:
93955@@ -2660,7 +2660,7 @@ static void rcu_sysidle_cancel(void)
93956 {
93957 smp_mb();
93958 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
93959- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
93960+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
93961 }
93962
93963 /*
93964@@ -2708,7 +2708,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
93965 smp_mb(); /* grace period precedes setting inuse. */
93966
93967 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
93968- ACCESS_ONCE(rshp->inuse) = 0;
93969+ ACCESS_ONCE_RW(rshp->inuse) = 0;
93970 }
93971
93972 /*
93973diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
93974index 5cdc62e..cc52e88 100644
93975--- a/kernel/rcu/tree_trace.c
93976+++ b/kernel/rcu/tree_trace.c
93977@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
93978 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
93979 rdp->passed_quiesce, rdp->qs_pending);
93980 seq_printf(m, " dt=%d/%llx/%d df=%lu",
93981- atomic_read(&rdp->dynticks->dynticks),
93982+ atomic_read_unchecked(&rdp->dynticks->dynticks),
93983 rdp->dynticks->dynticks_nesting,
93984 rdp->dynticks->dynticks_nmi_nesting,
93985 rdp->dynticks_fqs);
93986@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
93987 struct rcu_state *rsp = (struct rcu_state *)m->private;
93988
93989 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
93990- atomic_long_read(&rsp->expedited_start),
93991+ atomic_long_read_unchecked(&rsp->expedited_start),
93992 atomic_long_read(&rsp->expedited_done),
93993- atomic_long_read(&rsp->expedited_wrap),
93994- atomic_long_read(&rsp->expedited_tryfail),
93995- atomic_long_read(&rsp->expedited_workdone1),
93996- atomic_long_read(&rsp->expedited_workdone2),
93997- atomic_long_read(&rsp->expedited_normal),
93998- atomic_long_read(&rsp->expedited_stoppedcpus),
93999- atomic_long_read(&rsp->expedited_done_tries),
94000- atomic_long_read(&rsp->expedited_done_lost),
94001- atomic_long_read(&rsp->expedited_done_exit));
94002+ atomic_long_read_unchecked(&rsp->expedited_wrap),
94003+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
94004+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
94005+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
94006+ atomic_long_read_unchecked(&rsp->expedited_normal),
94007+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
94008+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
94009+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
94010+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
94011 return 0;
94012 }
94013
94014diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
94015index bc78835..7691a45 100644
94016--- a/kernel/rcu/update.c
94017+++ b/kernel/rcu/update.c
94018@@ -311,10 +311,10 @@ int rcu_jiffies_till_stall_check(void)
94019 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
94020 */
94021 if (till_stall_check < 3) {
94022- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
94023+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
94024 till_stall_check = 3;
94025 } else if (till_stall_check > 300) {
94026- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
94027+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
94028 till_stall_check = 300;
94029 }
94030 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
94031diff --git a/kernel/resource.c b/kernel/resource.c
94032index 3c2237a..4568d96 100644
94033--- a/kernel/resource.c
94034+++ b/kernel/resource.c
94035@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
94036
94037 static int __init ioresources_init(void)
94038 {
94039+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94040+#ifdef CONFIG_GRKERNSEC_PROC_USER
94041+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
94042+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
94043+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
94044+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
94045+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
94046+#endif
94047+#else
94048 proc_create("ioports", 0, NULL, &proc_ioports_operations);
94049 proc_create("iomem", 0, NULL, &proc_iomem_operations);
94050+#endif
94051 return 0;
94052 }
94053 __initcall(ioresources_init);
94054diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
94055index e73efba..c9bfbd4 100644
94056--- a/kernel/sched/auto_group.c
94057+++ b/kernel/sched/auto_group.c
94058@@ -11,7 +11,7 @@
94059
94060 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
94061 static struct autogroup autogroup_default;
94062-static atomic_t autogroup_seq_nr;
94063+static atomic_unchecked_t autogroup_seq_nr;
94064
94065 void __init autogroup_init(struct task_struct *init_task)
94066 {
94067@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
94068
94069 kref_init(&ag->kref);
94070 init_rwsem(&ag->lock);
94071- ag->id = atomic_inc_return(&autogroup_seq_nr);
94072+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
94073 ag->tg = tg;
94074 #ifdef CONFIG_RT_GROUP_SCHED
94075 /*
94076diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
94077index a63f4dc..349bbb0 100644
94078--- a/kernel/sched/completion.c
94079+++ b/kernel/sched/completion.c
94080@@ -204,7 +204,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
94081 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
94082 * or number of jiffies left till timeout) if completed.
94083 */
94084-long __sched
94085+long __sched __intentional_overflow(-1)
94086 wait_for_completion_interruptible_timeout(struct completion *x,
94087 unsigned long timeout)
94088 {
94089@@ -221,7 +221,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
94090 *
94091 * Return: -ERESTARTSYS if interrupted, 0 if completed.
94092 */
94093-int __sched wait_for_completion_killable(struct completion *x)
94094+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
94095 {
94096 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
94097 if (t == -ERESTARTSYS)
94098@@ -242,7 +242,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
94099 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
94100 * or number of jiffies left till timeout) if completed.
94101 */
94102-long __sched
94103+long __sched __intentional_overflow(-1)
94104 wait_for_completion_killable_timeout(struct completion *x,
94105 unsigned long timeout)
94106 {
94107diff --git a/kernel/sched/core.c b/kernel/sched/core.c
94108index bc1638b..a7638fc 100644
94109--- a/kernel/sched/core.c
94110+++ b/kernel/sched/core.c
94111@@ -1849,7 +1849,7 @@ void set_numabalancing_state(bool enabled)
94112 int sysctl_numa_balancing(struct ctl_table *table, int write,
94113 void __user *buffer, size_t *lenp, loff_t *ppos)
94114 {
94115- struct ctl_table t;
94116+ ctl_table_no_const t;
94117 int err;
94118 int state = numabalancing_enabled;
94119
94120@@ -2312,8 +2312,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
94121 next->active_mm = oldmm;
94122 atomic_inc(&oldmm->mm_count);
94123 enter_lazy_tlb(oldmm, next);
94124- } else
94125+ } else {
94126 switch_mm(oldmm, mm, next);
94127+ populate_stack();
94128+ }
94129
94130 if (!prev->mm) {
94131 prev->active_mm = NULL;
94132@@ -3081,6 +3083,8 @@ int can_nice(const struct task_struct *p, const int nice)
94133 /* convert nice value [19,-20] to rlimit style value [1,40] */
94134 int nice_rlim = nice_to_rlimit(nice);
94135
94136+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
94137+
94138 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
94139 capable(CAP_SYS_NICE));
94140 }
94141@@ -3107,7 +3111,8 @@ SYSCALL_DEFINE1(nice, int, increment)
94142 nice = task_nice(current) + increment;
94143
94144 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
94145- if (increment < 0 && !can_nice(current, nice))
94146+ if (increment < 0 && (!can_nice(current, nice) ||
94147+ gr_handle_chroot_nice()))
94148 return -EPERM;
94149
94150 retval = security_task_setnice(current, nice);
94151@@ -3380,6 +3385,7 @@ recheck:
94152 if (policy != p->policy && !rlim_rtprio)
94153 return -EPERM;
94154
94155+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
94156 /* can't increase priority */
94157 if (attr->sched_priority > p->rt_priority &&
94158 attr->sched_priority > rlim_rtprio)
94159@@ -4771,6 +4777,7 @@ void idle_task_exit(void)
94160
94161 if (mm != &init_mm) {
94162 switch_mm(mm, &init_mm, current);
94163+ populate_stack();
94164 finish_arch_post_lock_switch();
94165 }
94166 mmdrop(mm);
94167@@ -4866,7 +4873,7 @@ static void migrate_tasks(unsigned int dead_cpu)
94168
94169 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
94170
94171-static struct ctl_table sd_ctl_dir[] = {
94172+static ctl_table_no_const sd_ctl_dir[] __read_only = {
94173 {
94174 .procname = "sched_domain",
94175 .mode = 0555,
94176@@ -4883,17 +4890,17 @@ static struct ctl_table sd_ctl_root[] = {
94177 {}
94178 };
94179
94180-static struct ctl_table *sd_alloc_ctl_entry(int n)
94181+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
94182 {
94183- struct ctl_table *entry =
94184+ ctl_table_no_const *entry =
94185 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
94186
94187 return entry;
94188 }
94189
94190-static void sd_free_ctl_entry(struct ctl_table **tablep)
94191+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
94192 {
94193- struct ctl_table *entry;
94194+ ctl_table_no_const *entry;
94195
94196 /*
94197 * In the intermediate directories, both the child directory and
94198@@ -4901,22 +4908,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
94199 * will always be set. In the lowest directory the names are
94200 * static strings and all have proc handlers.
94201 */
94202- for (entry = *tablep; entry->mode; entry++) {
94203- if (entry->child)
94204- sd_free_ctl_entry(&entry->child);
94205+ for (entry = tablep; entry->mode; entry++) {
94206+ if (entry->child) {
94207+ sd_free_ctl_entry(entry->child);
94208+ pax_open_kernel();
94209+ entry->child = NULL;
94210+ pax_close_kernel();
94211+ }
94212 if (entry->proc_handler == NULL)
94213 kfree(entry->procname);
94214 }
94215
94216- kfree(*tablep);
94217- *tablep = NULL;
94218+ kfree(tablep);
94219 }
94220
94221 static int min_load_idx = 0;
94222 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
94223
94224 static void
94225-set_table_entry(struct ctl_table *entry,
94226+set_table_entry(ctl_table_no_const *entry,
94227 const char *procname, void *data, int maxlen,
94228 umode_t mode, proc_handler *proc_handler,
94229 bool load_idx)
94230@@ -4936,7 +4946,7 @@ set_table_entry(struct ctl_table *entry,
94231 static struct ctl_table *
94232 sd_alloc_ctl_domain_table(struct sched_domain *sd)
94233 {
94234- struct ctl_table *table = sd_alloc_ctl_entry(14);
94235+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
94236
94237 if (table == NULL)
94238 return NULL;
94239@@ -4974,9 +4984,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
94240 return table;
94241 }
94242
94243-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
94244+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
94245 {
94246- struct ctl_table *entry, *table;
94247+ ctl_table_no_const *entry, *table;
94248 struct sched_domain *sd;
94249 int domain_num = 0, i;
94250 char buf[32];
94251@@ -5003,11 +5013,13 @@ static struct ctl_table_header *sd_sysctl_header;
94252 static void register_sched_domain_sysctl(void)
94253 {
94254 int i, cpu_num = num_possible_cpus();
94255- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
94256+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
94257 char buf[32];
94258
94259 WARN_ON(sd_ctl_dir[0].child);
94260+ pax_open_kernel();
94261 sd_ctl_dir[0].child = entry;
94262+ pax_close_kernel();
94263
94264 if (entry == NULL)
94265 return;
94266@@ -5030,8 +5042,12 @@ static void unregister_sched_domain_sysctl(void)
94267 if (sd_sysctl_header)
94268 unregister_sysctl_table(sd_sysctl_header);
94269 sd_sysctl_header = NULL;
94270- if (sd_ctl_dir[0].child)
94271- sd_free_ctl_entry(&sd_ctl_dir[0].child);
94272+ if (sd_ctl_dir[0].child) {
94273+ sd_free_ctl_entry(sd_ctl_dir[0].child);
94274+ pax_open_kernel();
94275+ sd_ctl_dir[0].child = NULL;
94276+ pax_close_kernel();
94277+ }
94278 }
94279 #else
94280 static void register_sched_domain_sysctl(void)
94281diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
94282index fea7d33..84faa94 100644
94283--- a/kernel/sched/fair.c
94284+++ b/kernel/sched/fair.c
94285@@ -1857,7 +1857,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
94286
94287 static void reset_ptenuma_scan(struct task_struct *p)
94288 {
94289- ACCESS_ONCE(p->mm->numa_scan_seq)++;
94290+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
94291 p->mm->numa_scan_offset = 0;
94292 }
94293
94294@@ -7289,7 +7289,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
94295 * run_rebalance_domains is triggered when needed from the scheduler tick.
94296 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
94297 */
94298-static void run_rebalance_domains(struct softirq_action *h)
94299+static __latent_entropy void run_rebalance_domains(void)
94300 {
94301 struct rq *this_rq = this_rq();
94302 enum cpu_idle_type idle = this_rq->idle_balance ?
94303diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
94304index 31cc02e..734fb85 100644
94305--- a/kernel/sched/sched.h
94306+++ b/kernel/sched/sched.h
94307@@ -1153,7 +1153,7 @@ struct sched_class {
94308 #ifdef CONFIG_FAIR_GROUP_SCHED
94309 void (*task_move_group) (struct task_struct *p, int on_rq);
94310 #endif
94311-};
94312+} __do_const;
94313
94314 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
94315 {
94316diff --git a/kernel/seccomp.c b/kernel/seccomp.c
94317index 301bbc2..eda2da3 100644
94318--- a/kernel/seccomp.c
94319+++ b/kernel/seccomp.c
94320@@ -39,7 +39,7 @@
94321 * is only needed for handling filters shared across tasks.
94322 * @prev: points to a previously installed, or inherited, filter
94323 * @len: the number of instructions in the program
94324- * @insnsi: the BPF program instructions to evaluate
94325+ * @insns: the BPF program instructions to evaluate
94326 *
94327 * seccomp_filter objects are organized in a tree linked via the @prev
94328 * pointer. For any task, it appears to be a singly-linked list starting
94329@@ -54,32 +54,61 @@
94330 struct seccomp_filter {
94331 atomic_t usage;
94332 struct seccomp_filter *prev;
94333- struct sk_filter *prog;
94334+ unsigned short len; /* Instruction count */
94335+ struct sock_filter insns[];
94336 };
94337
94338 /* Limit any path through the tree to 256KB worth of instructions. */
94339 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
94340
94341-/*
94342+/**
94343+ * get_u32 - returns a u32 offset into data
94344+ * @data: a unsigned 64 bit value
94345+ * @index: 0 or 1 to return the first or second 32-bits
94346+ *
94347+ * This inline exists to hide the length of unsigned long. If a 32-bit
94348+ * unsigned long is passed in, it will be extended and the top 32-bits will be
94349+ * 0. If it is a 64-bit unsigned long, then whatever data is resident will be
94350+ * properly returned.
94351+ *
94352 * Endianness is explicitly ignored and left for BPF program authors to manage
94353 * as per the specific architecture.
94354 */
94355-static void populate_seccomp_data(struct seccomp_data *sd)
94356+static inline u32 get_u32(u64 data, int index)
94357 {
94358- struct task_struct *task = current;
94359- struct pt_regs *regs = task_pt_regs(task);
94360- unsigned long args[6];
94361+ return ((u32 *)&data)[index];
94362+}
94363
94364- sd->nr = syscall_get_nr(task, regs);
94365- sd->arch = syscall_get_arch();
94366- syscall_get_arguments(task, regs, 0, 6, args);
94367- sd->args[0] = args[0];
94368- sd->args[1] = args[1];
94369- sd->args[2] = args[2];
94370- sd->args[3] = args[3];
94371- sd->args[4] = args[4];
94372- sd->args[5] = args[5];
94373- sd->instruction_pointer = KSTK_EIP(task);
94374+/* Helper for bpf_load below. */
94375+#define BPF_DATA(_name) offsetof(struct seccomp_data, _name)
94376+/**
94377+ * bpf_load: checks and returns a pointer to the requested offset
94378+ * @off: offset into struct seccomp_data to load from
94379+ *
94380+ * Returns the requested 32-bits of data.
94381+ * seccomp_check_filter() should assure that @off is 32-bit aligned
94382+ * and not out of bounds. Failure to do so is a BUG.
94383+ */
94384+u32 seccomp_bpf_load(int off)
94385+{
94386+ struct pt_regs *regs = task_pt_regs(current);
94387+ if (off == BPF_DATA(nr))
94388+ return syscall_get_nr(current, regs);
94389+ if (off == BPF_DATA(arch))
94390+ return syscall_get_arch();
94391+ if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) {
94392+ unsigned long value;
94393+ int arg = (off - BPF_DATA(args[0])) / sizeof(u64);
94394+ int index = !!(off % sizeof(u64));
94395+ syscall_get_arguments(current, regs, arg, 1, &value);
94396+ return get_u32(value, index);
94397+ }
94398+ if (off == BPF_DATA(instruction_pointer))
94399+ return get_u32(KSTK_EIP(current), 0);
94400+ if (off == BPF_DATA(instruction_pointer) + sizeof(u32))
94401+ return get_u32(KSTK_EIP(current), 1);
94402+ /* seccomp_check_filter should make this impossible. */
94403+ BUG();
94404 }
94405
94406 /**
94407@@ -103,59 +132,59 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
94408 u32 k = ftest->k;
94409
94410 switch (code) {
94411- case BPF_LD | BPF_W | BPF_ABS:
94412- ftest->code = BPF_LDX | BPF_W | BPF_ABS;
94413+ case BPF_S_LD_W_ABS:
94414+ ftest->code = BPF_S_ANC_SECCOMP_LD_W;
94415 /* 32-bit aligned and not out of bounds. */
94416 if (k >= sizeof(struct seccomp_data) || k & 3)
94417 return -EINVAL;
94418 continue;
94419- case BPF_LD | BPF_W | BPF_LEN:
94420- ftest->code = BPF_LD | BPF_IMM;
94421+ case BPF_S_LD_W_LEN:
94422+ ftest->code = BPF_S_LD_IMM;
94423 ftest->k = sizeof(struct seccomp_data);
94424 continue;
94425- case BPF_LDX | BPF_W | BPF_LEN:
94426- ftest->code = BPF_LDX | BPF_IMM;
94427+ case BPF_S_LDX_W_LEN:
94428+ ftest->code = BPF_S_LDX_IMM;
94429 ftest->k = sizeof(struct seccomp_data);
94430 continue;
94431 /* Explicitly include allowed calls. */
94432- case BPF_RET | BPF_K:
94433- case BPF_RET | BPF_A:
94434- case BPF_ALU | BPF_ADD | BPF_K:
94435- case BPF_ALU | BPF_ADD | BPF_X:
94436- case BPF_ALU | BPF_SUB | BPF_K:
94437- case BPF_ALU | BPF_SUB | BPF_X:
94438- case BPF_ALU | BPF_MUL | BPF_K:
94439- case BPF_ALU | BPF_MUL | BPF_X:
94440- case BPF_ALU | BPF_DIV | BPF_K:
94441- case BPF_ALU | BPF_DIV | BPF_X:
94442- case BPF_ALU | BPF_AND | BPF_K:
94443- case BPF_ALU | BPF_AND | BPF_X:
94444- case BPF_ALU | BPF_OR | BPF_K:
94445- case BPF_ALU | BPF_OR | BPF_X:
94446- case BPF_ALU | BPF_XOR | BPF_K:
94447- case BPF_ALU | BPF_XOR | BPF_X:
94448- case BPF_ALU | BPF_LSH | BPF_K:
94449- case BPF_ALU | BPF_LSH | BPF_X:
94450- case BPF_ALU | BPF_RSH | BPF_K:
94451- case BPF_ALU | BPF_RSH | BPF_X:
94452- case BPF_ALU | BPF_NEG:
94453- case BPF_LD | BPF_IMM:
94454- case BPF_LDX | BPF_IMM:
94455- case BPF_MISC | BPF_TAX:
94456- case BPF_MISC | BPF_TXA:
94457- case BPF_LD | BPF_MEM:
94458- case BPF_LDX | BPF_MEM:
94459- case BPF_ST:
94460- case BPF_STX:
94461- case BPF_JMP | BPF_JA:
94462- case BPF_JMP | BPF_JEQ | BPF_K:
94463- case BPF_JMP | BPF_JEQ | BPF_X:
94464- case BPF_JMP | BPF_JGE | BPF_K:
94465- case BPF_JMP | BPF_JGE | BPF_X:
94466- case BPF_JMP | BPF_JGT | BPF_K:
94467- case BPF_JMP | BPF_JGT | BPF_X:
94468- case BPF_JMP | BPF_JSET | BPF_K:
94469- case BPF_JMP | BPF_JSET | BPF_X:
94470+ case BPF_S_RET_K:
94471+ case BPF_S_RET_A:
94472+ case BPF_S_ALU_ADD_K:
94473+ case BPF_S_ALU_ADD_X:
94474+ case BPF_S_ALU_SUB_K:
94475+ case BPF_S_ALU_SUB_X:
94476+ case BPF_S_ALU_MUL_K:
94477+ case BPF_S_ALU_MUL_X:
94478+ case BPF_S_ALU_DIV_X:
94479+ case BPF_S_ALU_AND_K:
94480+ case BPF_S_ALU_AND_X:
94481+ case BPF_S_ALU_OR_K:
94482+ case BPF_S_ALU_OR_X:
94483+ case BPF_S_ALU_XOR_K:
94484+ case BPF_S_ALU_XOR_X:
94485+ case BPF_S_ALU_LSH_K:
94486+ case BPF_S_ALU_LSH_X:
94487+ case BPF_S_ALU_RSH_K:
94488+ case BPF_S_ALU_RSH_X:
94489+ case BPF_S_ALU_NEG:
94490+ case BPF_S_LD_IMM:
94491+ case BPF_S_LDX_IMM:
94492+ case BPF_S_MISC_TAX:
94493+ case BPF_S_MISC_TXA:
94494+ case BPF_S_ALU_DIV_K:
94495+ case BPF_S_LD_MEM:
94496+ case BPF_S_LDX_MEM:
94497+ case BPF_S_ST:
94498+ case BPF_S_STX:
94499+ case BPF_S_JMP_JA:
94500+ case BPF_S_JMP_JEQ_K:
94501+ case BPF_S_JMP_JEQ_X:
94502+ case BPF_S_JMP_JGE_K:
94503+ case BPF_S_JMP_JGE_X:
94504+ case BPF_S_JMP_JGT_K:
94505+ case BPF_S_JMP_JGT_X:
94506+ case BPF_S_JMP_JSET_K:
94507+ case BPF_S_JMP_JSET_X:
94508 continue;
94509 default:
94510 return -EINVAL;
94511@@ -173,22 +202,18 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
94512 static u32 seccomp_run_filters(int syscall)
94513 {
94514 struct seccomp_filter *f;
94515- struct seccomp_data sd;
94516 u32 ret = SECCOMP_RET_ALLOW;
94517
94518 /* Ensure unexpected behavior doesn't result in failing open. */
94519 if (WARN_ON(current->seccomp.filter == NULL))
94520 return SECCOMP_RET_KILL;
94521
94522- populate_seccomp_data(&sd);
94523-
94524 /*
94525 * All filters in the list are evaluated and the lowest BPF return
94526 * value always takes priority (ignoring the DATA).
94527 */
94528 for (f = current->seccomp.filter; f; f = f->prev) {
94529- u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd);
94530-
94531+ u32 cur_ret = sk_run_filter(NULL, f->insns);
94532 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
94533 ret = cur_ret;
94534 }
94535@@ -206,20 +231,18 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
94536 struct seccomp_filter *filter;
94537 unsigned long fp_size = fprog->len * sizeof(struct sock_filter);
94538 unsigned long total_insns = fprog->len;
94539- struct sock_filter *fp;
94540- int new_len;
94541 long ret;
94542
94543 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
94544 return -EINVAL;
94545
94546 for (filter = current->seccomp.filter; filter; filter = filter->prev)
94547- total_insns += filter->prog->len + 4; /* include a 4 instr penalty */
94548+ total_insns += filter->len + 4; /* include a 4 instr penalty */
94549 if (total_insns > MAX_INSNS_PER_PATH)
94550 return -ENOMEM;
94551
94552 /*
94553- * Installing a seccomp filter requires that the task has
94554+ * Installing a seccomp filter requires that the task have
94555 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
94556 * This avoids scenarios where unprivileged tasks can affect the
94557 * behavior of privileged children.
94558@@ -229,51 +252,28 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
94559 CAP_SYS_ADMIN) != 0)
94560 return -EACCES;
94561
94562- fp = kzalloc(fp_size, GFP_KERNEL|__GFP_NOWARN);
94563- if (!fp)
94564- return -ENOMEM;
94565-
94566- /* Copy the instructions from fprog. */
94567- ret = -EFAULT;
94568- if (copy_from_user(fp, fprog->filter, fp_size))
94569- goto free_prog;
94570-
94571- /* Check and rewrite the fprog via the skb checker */
94572- ret = sk_chk_filter(fp, fprog->len);
94573- if (ret)
94574- goto free_prog;
94575-
94576- /* Check and rewrite the fprog for seccomp use */
94577- ret = seccomp_check_filter(fp, fprog->len);
94578- if (ret)
94579- goto free_prog;
94580-
94581- /* Convert 'sock_filter' insns to 'sock_filter_int' insns */
94582- ret = sk_convert_filter(fp, fprog->len, NULL, &new_len);
94583- if (ret)
94584- goto free_prog;
94585-
94586 /* Allocate a new seccomp_filter */
94587- ret = -ENOMEM;
94588- filter = kzalloc(sizeof(struct seccomp_filter),
94589+ filter = kzalloc(sizeof(struct seccomp_filter) + fp_size,
94590 GFP_KERNEL|__GFP_NOWARN);
94591 if (!filter)
94592- goto free_prog;
94593-
94594- filter->prog = kzalloc(sk_filter_size(new_len),
94595- GFP_KERNEL|__GFP_NOWARN);
94596- if (!filter->prog)
94597- goto free_filter;
94598-
94599- ret = sk_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
94600- if (ret)
94601- goto free_filter_prog;
94602- kfree(fp);
94603-
94604+ return -ENOMEM;
94605 atomic_set(&filter->usage, 1);
94606- filter->prog->len = new_len;
94607+ filter->len = fprog->len;
94608
94609- sk_filter_select_runtime(filter->prog);
94610+ /* Copy the instructions from fprog. */
94611+ ret = -EFAULT;
94612+ if (copy_from_user(filter->insns, fprog->filter, fp_size))
94613+ goto fail;
94614+
94615+ /* Check and rewrite the fprog via the skb checker */
94616+ ret = sk_chk_filter(filter->insns, filter->len);
94617+ if (ret)
94618+ goto fail;
94619+
94620+ /* Check and rewrite the fprog for seccomp use */
94621+ ret = seccomp_check_filter(filter->insns, filter->len);
94622+ if (ret)
94623+ goto fail;
94624
94625 /*
94626 * If there is an existing filter, make it the prev and don't drop its
94627@@ -282,13 +282,8 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
94628 filter->prev = current->seccomp.filter;
94629 current->seccomp.filter = filter;
94630 return 0;
94631-
94632-free_filter_prog:
94633- kfree(filter->prog);
94634-free_filter:
94635+fail:
94636 kfree(filter);
94637-free_prog:
94638- kfree(fp);
94639 return ret;
94640 }
94641
94642@@ -298,7 +293,7 @@ free_prog:
94643 *
94644 * Returns 0 on success and non-zero otherwise.
94645 */
94646-static long seccomp_attach_user_filter(char __user *user_filter)
94647+long seccomp_attach_user_filter(char __user *user_filter)
94648 {
94649 struct sock_fprog fprog;
94650 long ret = -EFAULT;
94651@@ -337,7 +332,6 @@ void put_seccomp_filter(struct task_struct *tsk)
94652 while (orig && atomic_dec_and_test(&orig->usage)) {
94653 struct seccomp_filter *freeme = orig;
94654 orig = orig->prev;
94655- sk_filter_free(freeme->prog);
94656 kfree(freeme);
94657 }
94658 }
94659diff --git a/kernel/signal.c b/kernel/signal.c
94660index a4077e9..f0d4e5c 100644
94661--- a/kernel/signal.c
94662+++ b/kernel/signal.c
94663@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
94664
94665 int print_fatal_signals __read_mostly;
94666
94667-static void __user *sig_handler(struct task_struct *t, int sig)
94668+static __sighandler_t sig_handler(struct task_struct *t, int sig)
94669 {
94670 return t->sighand->action[sig - 1].sa.sa_handler;
94671 }
94672
94673-static int sig_handler_ignored(void __user *handler, int sig)
94674+static int sig_handler_ignored(__sighandler_t handler, int sig)
94675 {
94676 /* Is it explicitly or implicitly ignored? */
94677 return handler == SIG_IGN ||
94678@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
94679
94680 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
94681 {
94682- void __user *handler;
94683+ __sighandler_t handler;
94684
94685 handler = sig_handler(t, sig);
94686
94687@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
94688 atomic_inc(&user->sigpending);
94689 rcu_read_unlock();
94690
94691+ if (!override_rlimit)
94692+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
94693+
94694 if (override_rlimit ||
94695 atomic_read(&user->sigpending) <=
94696 task_rlimit(t, RLIMIT_SIGPENDING)) {
94697@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
94698
94699 int unhandled_signal(struct task_struct *tsk, int sig)
94700 {
94701- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
94702+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
94703 if (is_global_init(tsk))
94704 return 1;
94705 if (handler != SIG_IGN && handler != SIG_DFL)
94706@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
94707 }
94708 }
94709
94710+ /* allow glibc communication via tgkill to other threads in our
94711+ thread group */
94712+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
94713+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
94714+ && gr_handle_signal(t, sig))
94715+ return -EPERM;
94716+
94717 return security_task_kill(t, info, sig, 0);
94718 }
94719
94720@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
94721 return send_signal(sig, info, p, 1);
94722 }
94723
94724-static int
94725+int
94726 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94727 {
94728 return send_signal(sig, info, t, 0);
94729@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94730 unsigned long int flags;
94731 int ret, blocked, ignored;
94732 struct k_sigaction *action;
94733+ int is_unhandled = 0;
94734
94735 spin_lock_irqsave(&t->sighand->siglock, flags);
94736 action = &t->sighand->action[sig-1];
94737@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94738 }
94739 if (action->sa.sa_handler == SIG_DFL)
94740 t->signal->flags &= ~SIGNAL_UNKILLABLE;
94741+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
94742+ is_unhandled = 1;
94743 ret = specific_send_sig_info(sig, info, t);
94744 spin_unlock_irqrestore(&t->sighand->siglock, flags);
94745
94746+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
94747+ normal operation */
94748+ if (is_unhandled) {
94749+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
94750+ gr_handle_crash(t, sig);
94751+ }
94752+
94753 return ret;
94754 }
94755
94756@@ -1296,8 +1316,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
94757 ret = check_kill_permission(sig, info, p);
94758 rcu_read_unlock();
94759
94760- if (!ret && sig)
94761+ if (!ret && sig) {
94762 ret = do_send_sig_info(sig, info, p, true);
94763+ if (!ret)
94764+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
94765+ }
94766
94767 return ret;
94768 }
94769@@ -2903,7 +2926,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
94770 int error = -ESRCH;
94771
94772 rcu_read_lock();
94773- p = find_task_by_vpid(pid);
94774+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
94775+ /* allow glibc communication via tgkill to other threads in our
94776+ thread group */
94777+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
94778+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
94779+ p = find_task_by_vpid_unrestricted(pid);
94780+ else
94781+#endif
94782+ p = find_task_by_vpid(pid);
94783 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
94784 error = check_kill_permission(sig, info, p);
94785 /*
94786@@ -3236,8 +3267,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
94787 }
94788 seg = get_fs();
94789 set_fs(KERNEL_DS);
94790- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
94791- (stack_t __force __user *) &uoss,
94792+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
94793+ (stack_t __force_user *) &uoss,
94794 compat_user_stack_pointer());
94795 set_fs(seg);
94796 if (ret >= 0 && uoss_ptr) {
94797diff --git a/kernel/smpboot.c b/kernel/smpboot.c
94798index eb89e18..a4e6792 100644
94799--- a/kernel/smpboot.c
94800+++ b/kernel/smpboot.c
94801@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
94802 }
94803 smpboot_unpark_thread(plug_thread, cpu);
94804 }
94805- list_add(&plug_thread->list, &hotplug_threads);
94806+ pax_list_add(&plug_thread->list, &hotplug_threads);
94807 out:
94808 mutex_unlock(&smpboot_threads_lock);
94809 return ret;
94810@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
94811 {
94812 get_online_cpus();
94813 mutex_lock(&smpboot_threads_lock);
94814- list_del(&plug_thread->list);
94815+ pax_list_del(&plug_thread->list);
94816 smpboot_destroy_threads(plug_thread);
94817 mutex_unlock(&smpboot_threads_lock);
94818 put_online_cpus();
94819diff --git a/kernel/softirq.c b/kernel/softirq.c
94820index 5918d22..e95d1926 100644
94821--- a/kernel/softirq.c
94822+++ b/kernel/softirq.c
94823@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
94824 EXPORT_SYMBOL(irq_stat);
94825 #endif
94826
94827-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
94828+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
94829
94830 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
94831
94832@@ -266,7 +266,7 @@ restart:
94833 kstat_incr_softirqs_this_cpu(vec_nr);
94834
94835 trace_softirq_entry(vec_nr);
94836- h->action(h);
94837+ h->action();
94838 trace_softirq_exit(vec_nr);
94839 if (unlikely(prev_count != preempt_count())) {
94840 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
94841@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
94842 or_softirq_pending(1UL << nr);
94843 }
94844
94845-void open_softirq(int nr, void (*action)(struct softirq_action *))
94846+void __init open_softirq(int nr, void (*action)(void))
94847 {
94848 softirq_vec[nr].action = action;
94849 }
94850@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
94851 }
94852 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
94853
94854-static void tasklet_action(struct softirq_action *a)
94855+static void tasklet_action(void)
94856 {
94857 struct tasklet_struct *list;
94858
94859@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
94860 }
94861 }
94862
94863-static void tasklet_hi_action(struct softirq_action *a)
94864+static __latent_entropy void tasklet_hi_action(void)
94865 {
94866 struct tasklet_struct *list;
94867
94868@@ -741,7 +741,7 @@ static struct notifier_block cpu_nfb = {
94869 .notifier_call = cpu_callback
94870 };
94871
94872-static struct smp_hotplug_thread softirq_threads = {
94873+static struct smp_hotplug_thread softirq_threads __read_only = {
94874 .store = &ksoftirqd,
94875 .thread_should_run = ksoftirqd_should_run,
94876 .thread_fn = run_ksoftirqd,
94877diff --git a/kernel/sys.c b/kernel/sys.c
94878index 66a751e..a42497e 100644
94879--- a/kernel/sys.c
94880+++ b/kernel/sys.c
94881@@ -148,6 +148,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
94882 error = -EACCES;
94883 goto out;
94884 }
94885+
94886+ if (gr_handle_chroot_setpriority(p, niceval)) {
94887+ error = -EACCES;
94888+ goto out;
94889+ }
94890+
94891 no_nice = security_task_setnice(p, niceval);
94892 if (no_nice) {
94893 error = no_nice;
94894@@ -351,6 +357,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
94895 goto error;
94896 }
94897
94898+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
94899+ goto error;
94900+
94901+ if (!gid_eq(new->gid, old->gid)) {
94902+ /* make sure we generate a learn log for what will
94903+ end up being a role transition after a full-learning
94904+ policy is generated
94905+ CAP_SETGID is required to perform a transition
94906+ we may not log a CAP_SETGID check above, e.g.
94907+ in the case where new rgid = old egid
94908+ */
94909+ gr_learn_cap(current, new, CAP_SETGID);
94910+ }
94911+
94912 if (rgid != (gid_t) -1 ||
94913 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
94914 new->sgid = new->egid;
94915@@ -386,6 +406,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
94916 old = current_cred();
94917
94918 retval = -EPERM;
94919+
94920+ if (gr_check_group_change(kgid, kgid, kgid))
94921+ goto error;
94922+
94923 if (ns_capable(old->user_ns, CAP_SETGID))
94924 new->gid = new->egid = new->sgid = new->fsgid = kgid;
94925 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
94926@@ -403,7 +427,7 @@ error:
94927 /*
94928 * change the user struct in a credentials set to match the new UID
94929 */
94930-static int set_user(struct cred *new)
94931+int set_user(struct cred *new)
94932 {
94933 struct user_struct *new_user;
94934
94935@@ -483,7 +507,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
94936 goto error;
94937 }
94938
94939+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
94940+ goto error;
94941+
94942 if (!uid_eq(new->uid, old->uid)) {
94943+ /* make sure we generate a learn log for what will
94944+ end up being a role transition after a full-learning
94945+ policy is generated
94946+ CAP_SETUID is required to perform a transition
94947+ we may not log a CAP_SETUID check above, e.g.
94948+ in the case where new ruid = old euid
94949+ */
94950+ gr_learn_cap(current, new, CAP_SETUID);
94951 retval = set_user(new);
94952 if (retval < 0)
94953 goto error;
94954@@ -533,6 +568,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
94955 old = current_cred();
94956
94957 retval = -EPERM;
94958+
94959+ if (gr_check_crash_uid(kuid))
94960+ goto error;
94961+ if (gr_check_user_change(kuid, kuid, kuid))
94962+ goto error;
94963+
94964 if (ns_capable(old->user_ns, CAP_SETUID)) {
94965 new->suid = new->uid = kuid;
94966 if (!uid_eq(kuid, old->uid)) {
94967@@ -602,6 +643,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
94968 goto error;
94969 }
94970
94971+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
94972+ goto error;
94973+
94974 if (ruid != (uid_t) -1) {
94975 new->uid = kruid;
94976 if (!uid_eq(kruid, old->uid)) {
94977@@ -684,6 +728,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
94978 goto error;
94979 }
94980
94981+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
94982+ goto error;
94983+
94984 if (rgid != (gid_t) -1)
94985 new->gid = krgid;
94986 if (egid != (gid_t) -1)
94987@@ -745,12 +792,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
94988 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
94989 ns_capable(old->user_ns, CAP_SETUID)) {
94990 if (!uid_eq(kuid, old->fsuid)) {
94991+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
94992+ goto error;
94993+
94994 new->fsuid = kuid;
94995 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
94996 goto change_okay;
94997 }
94998 }
94999
95000+error:
95001 abort_creds(new);
95002 return old_fsuid;
95003
95004@@ -783,12 +834,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
95005 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
95006 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
95007 ns_capable(old->user_ns, CAP_SETGID)) {
95008+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
95009+ goto error;
95010+
95011 if (!gid_eq(kgid, old->fsgid)) {
95012 new->fsgid = kgid;
95013 goto change_okay;
95014 }
95015 }
95016
95017+error:
95018 abort_creds(new);
95019 return old_fsgid;
95020
95021@@ -1167,19 +1222,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
95022 return -EFAULT;
95023
95024 down_read(&uts_sem);
95025- error = __copy_to_user(&name->sysname, &utsname()->sysname,
95026+ error = __copy_to_user(name->sysname, &utsname()->sysname,
95027 __OLD_UTS_LEN);
95028 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
95029- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
95030+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
95031 __OLD_UTS_LEN);
95032 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
95033- error |= __copy_to_user(&name->release, &utsname()->release,
95034+ error |= __copy_to_user(name->release, &utsname()->release,
95035 __OLD_UTS_LEN);
95036 error |= __put_user(0, name->release + __OLD_UTS_LEN);
95037- error |= __copy_to_user(&name->version, &utsname()->version,
95038+ error |= __copy_to_user(name->version, &utsname()->version,
95039 __OLD_UTS_LEN);
95040 error |= __put_user(0, name->version + __OLD_UTS_LEN);
95041- error |= __copy_to_user(&name->machine, &utsname()->machine,
95042+ error |= __copy_to_user(name->machine, &utsname()->machine,
95043 __OLD_UTS_LEN);
95044 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
95045 up_read(&uts_sem);
95046@@ -1381,6 +1436,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
95047 */
95048 new_rlim->rlim_cur = 1;
95049 }
95050+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
95051+ is changed to a lower value. Since tasks can be created by the same
95052+ user in between this limit change and an execve by this task, force
95053+ a recheck only for this task by setting PF_NPROC_EXCEEDED
95054+ */
95055+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
95056+ tsk->flags |= PF_NPROC_EXCEEDED;
95057 }
95058 if (!retval) {
95059 if (old_rlim)
95060diff --git a/kernel/sysctl.c b/kernel/sysctl.c
95061index 75b22e2..65c0ac8 100644
95062--- a/kernel/sysctl.c
95063+++ b/kernel/sysctl.c
95064@@ -94,7 +94,6 @@
95065
95066
95067 #if defined(CONFIG_SYSCTL)
95068-
95069 /* External variables not in a header file. */
95070 extern int max_threads;
95071 extern int suid_dumpable;
95072@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
95073
95074 /* Constants used for minimum and maximum */
95075 #ifdef CONFIG_LOCKUP_DETECTOR
95076-static int sixty = 60;
95077+static int sixty __read_only = 60;
95078 #endif
95079
95080-static int __maybe_unused neg_one = -1;
95081+static int __maybe_unused neg_one __read_only = -1;
95082
95083-static int zero;
95084-static int __maybe_unused one = 1;
95085-static int __maybe_unused two = 2;
95086-static int __maybe_unused four = 4;
95087-static unsigned long one_ul = 1;
95088-static int one_hundred = 100;
95089+static int zero __read_only = 0;
95090+static int __maybe_unused one __read_only = 1;
95091+static int __maybe_unused two __read_only = 2;
95092+static int __maybe_unused three __read_only = 3;
95093+static int __maybe_unused four __read_only = 4;
95094+static unsigned long one_ul __read_only = 1;
95095+static int one_hundred __read_only = 100;
95096 #ifdef CONFIG_PRINTK
95097-static int ten_thousand = 10000;
95098+static int ten_thousand __read_only = 10000;
95099 #endif
95100
95101 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
95102@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
95103 void __user *buffer, size_t *lenp, loff_t *ppos);
95104 #endif
95105
95106-#ifdef CONFIG_PRINTK
95107 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95108 void __user *buffer, size_t *lenp, loff_t *ppos);
95109-#endif
95110
95111 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
95112 void __user *buffer, size_t *lenp, loff_t *ppos);
95113@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
95114
95115 #endif
95116
95117+extern struct ctl_table grsecurity_table[];
95118+
95119 static struct ctl_table kern_table[];
95120 static struct ctl_table vm_table[];
95121 static struct ctl_table fs_table[];
95122@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
95123 int sysctl_legacy_va_layout;
95124 #endif
95125
95126+#ifdef CONFIG_PAX_SOFTMODE
95127+static ctl_table pax_table[] = {
95128+ {
95129+ .procname = "softmode",
95130+ .data = &pax_softmode,
95131+ .maxlen = sizeof(unsigned int),
95132+ .mode = 0600,
95133+ .proc_handler = &proc_dointvec,
95134+ },
95135+
95136+ { }
95137+};
95138+#endif
95139+
95140 /* The default sysctl tables: */
95141
95142 static struct ctl_table sysctl_base_table[] = {
95143@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
95144 #endif
95145
95146 static struct ctl_table kern_table[] = {
95147+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
95148+ {
95149+ .procname = "grsecurity",
95150+ .mode = 0500,
95151+ .child = grsecurity_table,
95152+ },
95153+#endif
95154+
95155+#ifdef CONFIG_PAX_SOFTMODE
95156+ {
95157+ .procname = "pax",
95158+ .mode = 0500,
95159+ .child = pax_table,
95160+ },
95161+#endif
95162+
95163 {
95164 .procname = "sched_child_runs_first",
95165 .data = &sysctl_sched_child_runs_first,
95166@@ -641,7 +671,7 @@ static struct ctl_table kern_table[] = {
95167 .data = &modprobe_path,
95168 .maxlen = KMOD_PATH_LEN,
95169 .mode = 0644,
95170- .proc_handler = proc_dostring,
95171+ .proc_handler = proc_dostring_modpriv,
95172 },
95173 {
95174 .procname = "modules_disabled",
95175@@ -808,16 +838,20 @@ static struct ctl_table kern_table[] = {
95176 .extra1 = &zero,
95177 .extra2 = &one,
95178 },
95179+#endif
95180 {
95181 .procname = "kptr_restrict",
95182 .data = &kptr_restrict,
95183 .maxlen = sizeof(int),
95184 .mode = 0644,
95185 .proc_handler = proc_dointvec_minmax_sysadmin,
95186+#ifdef CONFIG_GRKERNSEC_HIDESYM
95187+ .extra1 = &two,
95188+#else
95189 .extra1 = &zero,
95190+#endif
95191 .extra2 = &two,
95192 },
95193-#endif
95194 {
95195 .procname = "ngroups_max",
95196 .data = &ngroups_max,
95197@@ -1073,10 +1107,17 @@ static struct ctl_table kern_table[] = {
95198 */
95199 {
95200 .procname = "perf_event_paranoid",
95201- .data = &sysctl_perf_event_paranoid,
95202- .maxlen = sizeof(sysctl_perf_event_paranoid),
95203+ .data = &sysctl_perf_event_legitimately_concerned,
95204+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
95205 .mode = 0644,
95206- .proc_handler = proc_dointvec,
95207+ /* go ahead, be a hero */
95208+ .proc_handler = proc_dointvec_minmax_sysadmin,
95209+ .extra1 = &neg_one,
95210+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
95211+ .extra2 = &three,
95212+#else
95213+ .extra2 = &two,
95214+#endif
95215 },
95216 {
95217 .procname = "perf_event_mlock_kb",
95218@@ -1338,6 +1379,13 @@ static struct ctl_table vm_table[] = {
95219 .proc_handler = proc_dointvec_minmax,
95220 .extra1 = &zero,
95221 },
95222+ {
95223+ .procname = "heap_stack_gap",
95224+ .data = &sysctl_heap_stack_gap,
95225+ .maxlen = sizeof(sysctl_heap_stack_gap),
95226+ .mode = 0644,
95227+ .proc_handler = proc_doulongvec_minmax,
95228+ },
95229 #else
95230 {
95231 .procname = "nr_trim_pages",
95232@@ -1827,6 +1875,16 @@ int proc_dostring(struct ctl_table *table, int write,
95233 (char __user *)buffer, lenp, ppos);
95234 }
95235
95236+int proc_dostring_modpriv(struct ctl_table *table, int write,
95237+ void __user *buffer, size_t *lenp, loff_t *ppos)
95238+{
95239+ if (write && !capable(CAP_SYS_MODULE))
95240+ return -EPERM;
95241+
95242+ return _proc_do_string(table->data, table->maxlen, write,
95243+ buffer, lenp, ppos);
95244+}
95245+
95246 static size_t proc_skip_spaces(char **buf)
95247 {
95248 size_t ret;
95249@@ -1932,6 +1990,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
95250 len = strlen(tmp);
95251 if (len > *size)
95252 len = *size;
95253+ if (len > sizeof(tmp))
95254+ len = sizeof(tmp);
95255 if (copy_to_user(*buf, tmp, len))
95256 return -EFAULT;
95257 *size -= len;
95258@@ -2109,7 +2169,7 @@ int proc_dointvec(struct ctl_table *table, int write,
95259 static int proc_taint(struct ctl_table *table, int write,
95260 void __user *buffer, size_t *lenp, loff_t *ppos)
95261 {
95262- struct ctl_table t;
95263+ ctl_table_no_const t;
95264 unsigned long tmptaint = get_taint();
95265 int err;
95266
95267@@ -2137,7 +2197,6 @@ static int proc_taint(struct ctl_table *table, int write,
95268 return err;
95269 }
95270
95271-#ifdef CONFIG_PRINTK
95272 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95273 void __user *buffer, size_t *lenp, loff_t *ppos)
95274 {
95275@@ -2146,7 +2205,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95276
95277 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
95278 }
95279-#endif
95280
95281 struct do_proc_dointvec_minmax_conv_param {
95282 int *min;
95283@@ -2706,6 +2764,12 @@ int proc_dostring(struct ctl_table *table, int write,
95284 return -ENOSYS;
95285 }
95286
95287+int proc_dostring_modpriv(struct ctl_table *table, int write,
95288+ void __user *buffer, size_t *lenp, loff_t *ppos)
95289+{
95290+ return -ENOSYS;
95291+}
95292+
95293 int proc_dointvec(struct ctl_table *table, int write,
95294 void __user *buffer, size_t *lenp, loff_t *ppos)
95295 {
95296@@ -2762,5 +2826,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
95297 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
95298 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
95299 EXPORT_SYMBOL(proc_dostring);
95300+EXPORT_SYMBOL(proc_dostring_modpriv);
95301 EXPORT_SYMBOL(proc_doulongvec_minmax);
95302 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
95303diff --git a/kernel/taskstats.c b/kernel/taskstats.c
95304index 13d2f7c..c93d0b0 100644
95305--- a/kernel/taskstats.c
95306+++ b/kernel/taskstats.c
95307@@ -28,9 +28,12 @@
95308 #include <linux/fs.h>
95309 #include <linux/file.h>
95310 #include <linux/pid_namespace.h>
95311+#include <linux/grsecurity.h>
95312 #include <net/genetlink.h>
95313 #include <linux/atomic.h>
95314
95315+extern int gr_is_taskstats_denied(int pid);
95316+
95317 /*
95318 * Maximum length of a cpumask that can be specified in
95319 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
95320@@ -576,6 +579,9 @@ err:
95321
95322 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
95323 {
95324+ if (gr_is_taskstats_denied(current->pid))
95325+ return -EACCES;
95326+
95327 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
95328 return cmd_attr_register_cpumask(info);
95329 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
95330diff --git a/kernel/time.c b/kernel/time.c
95331index 7c7964c..2a0d412 100644
95332--- a/kernel/time.c
95333+++ b/kernel/time.c
95334@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
95335 return error;
95336
95337 if (tz) {
95338+ /* we log in do_settimeofday called below, so don't log twice
95339+ */
95340+ if (!tv)
95341+ gr_log_timechange();
95342+
95343 sys_tz = *tz;
95344 update_vsyscall_tz();
95345 if (firsttime) {
95346diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
95347index fe75444..190c528 100644
95348--- a/kernel/time/alarmtimer.c
95349+++ b/kernel/time/alarmtimer.c
95350@@ -811,7 +811,7 @@ static int __init alarmtimer_init(void)
95351 struct platform_device *pdev;
95352 int error = 0;
95353 int i;
95354- struct k_clock alarm_clock = {
95355+ static struct k_clock alarm_clock = {
95356 .clock_getres = alarm_clock_getres,
95357 .clock_get = alarm_clock_get,
95358 .timer_create = alarm_timer_create,
95359diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
95360index 32d8d6a..11486af 100644
95361--- a/kernel/time/timekeeping.c
95362+++ b/kernel/time/timekeeping.c
95363@@ -15,6 +15,7 @@
95364 #include <linux/init.h>
95365 #include <linux/mm.h>
95366 #include <linux/sched.h>
95367+#include <linux/grsecurity.h>
95368 #include <linux/syscore_ops.h>
95369 #include <linux/clocksource.h>
95370 #include <linux/jiffies.h>
95371@@ -502,6 +503,8 @@ int do_settimeofday(const struct timespec *tv)
95372 if (!timespec_valid_strict(tv))
95373 return -EINVAL;
95374
95375+ gr_log_timechange();
95376+
95377 raw_spin_lock_irqsave(&timekeeper_lock, flags);
95378 write_seqcount_begin(&timekeeper_seq);
95379
95380diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
95381index 61ed862..3b52c65 100644
95382--- a/kernel/time/timer_list.c
95383+++ b/kernel/time/timer_list.c
95384@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
95385
95386 static void print_name_offset(struct seq_file *m, void *sym)
95387 {
95388+#ifdef CONFIG_GRKERNSEC_HIDESYM
95389+ SEQ_printf(m, "<%p>", NULL);
95390+#else
95391 char symname[KSYM_NAME_LEN];
95392
95393 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
95394 SEQ_printf(m, "<%pK>", sym);
95395 else
95396 SEQ_printf(m, "%s", symname);
95397+#endif
95398 }
95399
95400 static void
95401@@ -119,7 +123,11 @@ next_one:
95402 static void
95403 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
95404 {
95405+#ifdef CONFIG_GRKERNSEC_HIDESYM
95406+ SEQ_printf(m, " .base: %p\n", NULL);
95407+#else
95408 SEQ_printf(m, " .base: %pK\n", base);
95409+#endif
95410 SEQ_printf(m, " .index: %d\n",
95411 base->index);
95412 SEQ_printf(m, " .resolution: %Lu nsecs\n",
95413@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
95414 {
95415 struct proc_dir_entry *pe;
95416
95417+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95418+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
95419+#else
95420 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
95421+#endif
95422 if (!pe)
95423 return -ENOMEM;
95424 return 0;
95425diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
95426index 1fb08f2..ca4bb1e 100644
95427--- a/kernel/time/timer_stats.c
95428+++ b/kernel/time/timer_stats.c
95429@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
95430 static unsigned long nr_entries;
95431 static struct entry entries[MAX_ENTRIES];
95432
95433-static atomic_t overflow_count;
95434+static atomic_unchecked_t overflow_count;
95435
95436 /*
95437 * The entries are in a hash-table, for fast lookup:
95438@@ -140,7 +140,7 @@ static void reset_entries(void)
95439 nr_entries = 0;
95440 memset(entries, 0, sizeof(entries));
95441 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
95442- atomic_set(&overflow_count, 0);
95443+ atomic_set_unchecked(&overflow_count, 0);
95444 }
95445
95446 static struct entry *alloc_entry(void)
95447@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
95448 if (likely(entry))
95449 entry->count++;
95450 else
95451- atomic_inc(&overflow_count);
95452+ atomic_inc_unchecked(&overflow_count);
95453
95454 out_unlock:
95455 raw_spin_unlock_irqrestore(lock, flags);
95456@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
95457
95458 static void print_name_offset(struct seq_file *m, unsigned long addr)
95459 {
95460+#ifdef CONFIG_GRKERNSEC_HIDESYM
95461+ seq_printf(m, "<%p>", NULL);
95462+#else
95463 char symname[KSYM_NAME_LEN];
95464
95465 if (lookup_symbol_name(addr, symname) < 0)
95466- seq_printf(m, "<%p>", (void *)addr);
95467+ seq_printf(m, "<%pK>", (void *)addr);
95468 else
95469 seq_printf(m, "%s", symname);
95470+#endif
95471 }
95472
95473 static int tstats_show(struct seq_file *m, void *v)
95474@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
95475
95476 seq_puts(m, "Timer Stats Version: v0.3\n");
95477 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
95478- if (atomic_read(&overflow_count))
95479- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
95480+ if (atomic_read_unchecked(&overflow_count))
95481+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
95482 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
95483
95484 for (i = 0; i < nr_entries; i++) {
95485@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
95486 {
95487 struct proc_dir_entry *pe;
95488
95489+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95490+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
95491+#else
95492 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
95493+#endif
95494 if (!pe)
95495 return -ENOMEM;
95496 return 0;
95497diff --git a/kernel/timer.c b/kernel/timer.c
95498index 3bb01a3..0e7760e 100644
95499--- a/kernel/timer.c
95500+++ b/kernel/timer.c
95501@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
95502 /*
95503 * This function runs timers and the timer-tq in bottom half context.
95504 */
95505-static void run_timer_softirq(struct softirq_action *h)
95506+static __latent_entropy void run_timer_softirq(void)
95507 {
95508 struct tvec_base *base = __this_cpu_read(tvec_bases);
95509
95510@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
95511 *
95512 * In all cases the return value is guaranteed to be non-negative.
95513 */
95514-signed long __sched schedule_timeout(signed long timeout)
95515+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
95516 {
95517 struct timer_list timer;
95518 unsigned long expire;
95519diff --git a/kernel/torture.c b/kernel/torture.c
95520index 40bb511..91190b9 100644
95521--- a/kernel/torture.c
95522+++ b/kernel/torture.c
95523@@ -484,7 +484,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
95524 mutex_lock(&fullstop_mutex);
95525 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
95526 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
95527- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
95528+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
95529 } else {
95530 pr_warn("Concurrent rmmod and shutdown illegal!\n");
95531 }
95532@@ -551,14 +551,14 @@ static int torture_stutter(void *arg)
95533 if (!torture_must_stop()) {
95534 if (stutter > 1) {
95535 schedule_timeout_interruptible(stutter - 1);
95536- ACCESS_ONCE(stutter_pause_test) = 2;
95537+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
95538 }
95539 schedule_timeout_interruptible(1);
95540- ACCESS_ONCE(stutter_pause_test) = 1;
95541+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
95542 }
95543 if (!torture_must_stop())
95544 schedule_timeout_interruptible(stutter);
95545- ACCESS_ONCE(stutter_pause_test) = 0;
95546+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
95547 torture_shutdown_absorb("torture_stutter");
95548 } while (!torture_must_stop());
95549 torture_kthread_stopping("torture_stutter");
95550@@ -645,7 +645,7 @@ bool torture_cleanup(void)
95551 schedule_timeout_uninterruptible(10);
95552 return true;
95553 }
95554- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
95555+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
95556 mutex_unlock(&fullstop_mutex);
95557 torture_shutdown_cleanup();
95558 torture_shuffle_cleanup();
95559diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
95560index c1bd4ad..4b861dc 100644
95561--- a/kernel/trace/blktrace.c
95562+++ b/kernel/trace/blktrace.c
95563@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
95564 struct blk_trace *bt = filp->private_data;
95565 char buf[16];
95566
95567- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
95568+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
95569
95570 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
95571 }
95572@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
95573 return 1;
95574
95575 bt = buf->chan->private_data;
95576- atomic_inc(&bt->dropped);
95577+ atomic_inc_unchecked(&bt->dropped);
95578 return 0;
95579 }
95580
95581@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
95582
95583 bt->dir = dir;
95584 bt->dev = dev;
95585- atomic_set(&bt->dropped, 0);
95586+ atomic_set_unchecked(&bt->dropped, 0);
95587 INIT_LIST_HEAD(&bt->running_list);
95588
95589 ret = -EIO;
95590diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
95591index ac9d1da..ce98b35 100644
95592--- a/kernel/trace/ftrace.c
95593+++ b/kernel/trace/ftrace.c
95594@@ -1920,12 +1920,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
95595 if (unlikely(ftrace_disabled))
95596 return 0;
95597
95598+ ret = ftrace_arch_code_modify_prepare();
95599+ FTRACE_WARN_ON(ret);
95600+ if (ret)
95601+ return 0;
95602+
95603 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
95604+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
95605 if (ret) {
95606 ftrace_bug(ret, ip);
95607- return 0;
95608 }
95609- return 1;
95610+ return ret ? 0 : 1;
95611 }
95612
95613 /*
95614@@ -4126,8 +4131,10 @@ static int ftrace_process_locs(struct module *mod,
95615 if (!count)
95616 return 0;
95617
95618+ pax_open_kernel();
95619 sort(start, count, sizeof(*start),
95620 ftrace_cmp_ips, ftrace_swap_ips);
95621+ pax_close_kernel();
95622
95623 start_pg = ftrace_allocate_pages(count);
95624 if (!start_pg)
95625diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
95626index ff70271..4242e69 100644
95627--- a/kernel/trace/ring_buffer.c
95628+++ b/kernel/trace/ring_buffer.c
95629@@ -352,9 +352,9 @@ struct buffer_data_page {
95630 */
95631 struct buffer_page {
95632 struct list_head list; /* list of buffer pages */
95633- local_t write; /* index for next write */
95634+ local_unchecked_t write; /* index for next write */
95635 unsigned read; /* index for next read */
95636- local_t entries; /* entries on this page */
95637+ local_unchecked_t entries; /* entries on this page */
95638 unsigned long real_end; /* real end of data */
95639 struct buffer_data_page *page; /* Actual data page */
95640 };
95641@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
95642 unsigned long last_overrun;
95643 local_t entries_bytes;
95644 local_t entries;
95645- local_t overrun;
95646- local_t commit_overrun;
95647+ local_unchecked_t overrun;
95648+ local_unchecked_t commit_overrun;
95649 local_t dropped_events;
95650 local_t committing;
95651 local_t commits;
95652@@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
95653 work = &cpu_buffer->irq_work;
95654 }
95655
95656- work->waiters_pending = true;
95657 poll_wait(filp, &work->waiters, poll_table);
95658+ work->waiters_pending = true;
95659+ /*
95660+ * There's a tight race between setting the waiters_pending and
95661+ * checking if the ring buffer is empty. Once the waiters_pending bit
95662+ * is set, the next event will wake the task up, but we can get stuck
95663+ * if there's only a single event in.
95664+ *
95665+ * FIXME: Ideally, we need a memory barrier on the writer side as well,
95666+ * but adding a memory barrier to all events will cause too much of a
95667+ * performance hit in the fast path. We only need a memory barrier when
95668+ * the buffer goes from empty to having content. But as this race is
95669+ * extremely small, and it's not a problem if another event comes in, we
95670+ * will fix it later.
95671+ */
95672+ smp_mb();
95673
95674 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
95675 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
95676@@ -991,8 +1005,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
95677 *
95678 * We add a counter to the write field to denote this.
95679 */
95680- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
95681- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
95682+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
95683+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
95684
95685 /*
95686 * Just make sure we have seen our old_write and synchronize
95687@@ -1020,8 +1034,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
95688 * cmpxchg to only update if an interrupt did not already
95689 * do it for us. If the cmpxchg fails, we don't care.
95690 */
95691- (void)local_cmpxchg(&next_page->write, old_write, val);
95692- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
95693+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
95694+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
95695
95696 /*
95697 * No need to worry about races with clearing out the commit.
95698@@ -1388,12 +1402,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
95699
95700 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
95701 {
95702- return local_read(&bpage->entries) & RB_WRITE_MASK;
95703+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
95704 }
95705
95706 static inline unsigned long rb_page_write(struct buffer_page *bpage)
95707 {
95708- return local_read(&bpage->write) & RB_WRITE_MASK;
95709+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
95710 }
95711
95712 static int
95713@@ -1488,7 +1502,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
95714 * bytes consumed in ring buffer from here.
95715 * Increment overrun to account for the lost events.
95716 */
95717- local_add(page_entries, &cpu_buffer->overrun);
95718+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
95719 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
95720 }
95721
95722@@ -2066,7 +2080,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
95723 * it is our responsibility to update
95724 * the counters.
95725 */
95726- local_add(entries, &cpu_buffer->overrun);
95727+ local_add_unchecked(entries, &cpu_buffer->overrun);
95728 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
95729
95730 /*
95731@@ -2216,7 +2230,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
95732 if (tail == BUF_PAGE_SIZE)
95733 tail_page->real_end = 0;
95734
95735- local_sub(length, &tail_page->write);
95736+ local_sub_unchecked(length, &tail_page->write);
95737 return;
95738 }
95739
95740@@ -2251,7 +2265,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
95741 rb_event_set_padding(event);
95742
95743 /* Set the write back to the previous setting */
95744- local_sub(length, &tail_page->write);
95745+ local_sub_unchecked(length, &tail_page->write);
95746 return;
95747 }
95748
95749@@ -2263,7 +2277,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
95750
95751 /* Set write to end of buffer */
95752 length = (tail + length) - BUF_PAGE_SIZE;
95753- local_sub(length, &tail_page->write);
95754+ local_sub_unchecked(length, &tail_page->write);
95755 }
95756
95757 /*
95758@@ -2289,7 +2303,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
95759 * about it.
95760 */
95761 if (unlikely(next_page == commit_page)) {
95762- local_inc(&cpu_buffer->commit_overrun);
95763+ local_inc_unchecked(&cpu_buffer->commit_overrun);
95764 goto out_reset;
95765 }
95766
95767@@ -2345,7 +2359,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
95768 cpu_buffer->tail_page) &&
95769 (cpu_buffer->commit_page ==
95770 cpu_buffer->reader_page))) {
95771- local_inc(&cpu_buffer->commit_overrun);
95772+ local_inc_unchecked(&cpu_buffer->commit_overrun);
95773 goto out_reset;
95774 }
95775 }
95776@@ -2393,7 +2407,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
95777 length += RB_LEN_TIME_EXTEND;
95778
95779 tail_page = cpu_buffer->tail_page;
95780- write = local_add_return(length, &tail_page->write);
95781+ write = local_add_return_unchecked(length, &tail_page->write);
95782
95783 /* set write to only the index of the write */
95784 write &= RB_WRITE_MASK;
95785@@ -2417,7 +2431,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
95786 kmemcheck_annotate_bitfield(event, bitfield);
95787 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
95788
95789- local_inc(&tail_page->entries);
95790+ local_inc_unchecked(&tail_page->entries);
95791
95792 /*
95793 * If this is the first commit on the page, then update
95794@@ -2450,7 +2464,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
95795
95796 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
95797 unsigned long write_mask =
95798- local_read(&bpage->write) & ~RB_WRITE_MASK;
95799+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
95800 unsigned long event_length = rb_event_length(event);
95801 /*
95802 * This is on the tail page. It is possible that
95803@@ -2460,7 +2474,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
95804 */
95805 old_index += write_mask;
95806 new_index += write_mask;
95807- index = local_cmpxchg(&bpage->write, old_index, new_index);
95808+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
95809 if (index == old_index) {
95810 /* update counters */
95811 local_sub(event_length, &cpu_buffer->entries_bytes);
95812@@ -2852,7 +2866,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
95813
95814 /* Do the likely case first */
95815 if (likely(bpage->page == (void *)addr)) {
95816- local_dec(&bpage->entries);
95817+ local_dec_unchecked(&bpage->entries);
95818 return;
95819 }
95820
95821@@ -2864,7 +2878,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
95822 start = bpage;
95823 do {
95824 if (bpage->page == (void *)addr) {
95825- local_dec(&bpage->entries);
95826+ local_dec_unchecked(&bpage->entries);
95827 return;
95828 }
95829 rb_inc_page(cpu_buffer, &bpage);
95830@@ -3148,7 +3162,7 @@ static inline unsigned long
95831 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
95832 {
95833 return local_read(&cpu_buffer->entries) -
95834- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
95835+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
95836 }
95837
95838 /**
95839@@ -3237,7 +3251,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
95840 return 0;
95841
95842 cpu_buffer = buffer->buffers[cpu];
95843- ret = local_read(&cpu_buffer->overrun);
95844+ ret = local_read_unchecked(&cpu_buffer->overrun);
95845
95846 return ret;
95847 }
95848@@ -3260,7 +3274,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
95849 return 0;
95850
95851 cpu_buffer = buffer->buffers[cpu];
95852- ret = local_read(&cpu_buffer->commit_overrun);
95853+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
95854
95855 return ret;
95856 }
95857@@ -3345,7 +3359,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
95858 /* if you care about this being correct, lock the buffer */
95859 for_each_buffer_cpu(buffer, cpu) {
95860 cpu_buffer = buffer->buffers[cpu];
95861- overruns += local_read(&cpu_buffer->overrun);
95862+ overruns += local_read_unchecked(&cpu_buffer->overrun);
95863 }
95864
95865 return overruns;
95866@@ -3521,8 +3535,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
95867 /*
95868 * Reset the reader page to size zero.
95869 */
95870- local_set(&cpu_buffer->reader_page->write, 0);
95871- local_set(&cpu_buffer->reader_page->entries, 0);
95872+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
95873+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
95874 local_set(&cpu_buffer->reader_page->page->commit, 0);
95875 cpu_buffer->reader_page->real_end = 0;
95876
95877@@ -3556,7 +3570,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
95878 * want to compare with the last_overrun.
95879 */
95880 smp_mb();
95881- overwrite = local_read(&(cpu_buffer->overrun));
95882+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
95883
95884 /*
95885 * Here's the tricky part.
95886@@ -4126,8 +4140,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
95887
95888 cpu_buffer->head_page
95889 = list_entry(cpu_buffer->pages, struct buffer_page, list);
95890- local_set(&cpu_buffer->head_page->write, 0);
95891- local_set(&cpu_buffer->head_page->entries, 0);
95892+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
95893+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
95894 local_set(&cpu_buffer->head_page->page->commit, 0);
95895
95896 cpu_buffer->head_page->read = 0;
95897@@ -4137,14 +4151,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
95898
95899 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
95900 INIT_LIST_HEAD(&cpu_buffer->new_pages);
95901- local_set(&cpu_buffer->reader_page->write, 0);
95902- local_set(&cpu_buffer->reader_page->entries, 0);
95903+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
95904+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
95905 local_set(&cpu_buffer->reader_page->page->commit, 0);
95906 cpu_buffer->reader_page->read = 0;
95907
95908 local_set(&cpu_buffer->entries_bytes, 0);
95909- local_set(&cpu_buffer->overrun, 0);
95910- local_set(&cpu_buffer->commit_overrun, 0);
95911+ local_set_unchecked(&cpu_buffer->overrun, 0);
95912+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
95913 local_set(&cpu_buffer->dropped_events, 0);
95914 local_set(&cpu_buffer->entries, 0);
95915 local_set(&cpu_buffer->committing, 0);
95916@@ -4549,8 +4563,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
95917 rb_init_page(bpage);
95918 bpage = reader->page;
95919 reader->page = *data_page;
95920- local_set(&reader->write, 0);
95921- local_set(&reader->entries, 0);
95922+ local_set_unchecked(&reader->write, 0);
95923+ local_set_unchecked(&reader->entries, 0);
95924 reader->read = 0;
95925 *data_page = bpage;
95926
95927diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
95928index 291397e..db3836d 100644
95929--- a/kernel/trace/trace.c
95930+++ b/kernel/trace/trace.c
95931@@ -3510,7 +3510,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
95932 return 0;
95933 }
95934
95935-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
95936+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
95937 {
95938 /* do nothing if flag is already set */
95939 if (!!(trace_flags & mask) == !!enabled)
95940diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
95941index 9258f5a..9b1e41e 100644
95942--- a/kernel/trace/trace.h
95943+++ b/kernel/trace/trace.h
95944@@ -1278,7 +1278,7 @@ extern const char *__stop___tracepoint_str[];
95945 void trace_printk_init_buffers(void);
95946 void trace_printk_start_comm(void);
95947 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
95948-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
95949+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
95950
95951 /*
95952 * Normal trace_printk() and friends allocates special buffers
95953diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
95954index 57b67b1..66082a9 100644
95955--- a/kernel/trace/trace_clock.c
95956+++ b/kernel/trace/trace_clock.c
95957@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
95958 return now;
95959 }
95960
95961-static atomic64_t trace_counter;
95962+static atomic64_unchecked_t trace_counter;
95963
95964 /*
95965 * trace_clock_counter(): simply an atomic counter.
95966@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
95967 */
95968 u64 notrace trace_clock_counter(void)
95969 {
95970- return atomic64_add_return(1, &trace_counter);
95971+ return atomic64_inc_return_unchecked(&trace_counter);
95972 }
95973diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
95974index 2de5362..c4c7003 100644
95975--- a/kernel/trace/trace_events.c
95976+++ b/kernel/trace/trace_events.c
95977@@ -1722,7 +1722,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
95978 return 0;
95979 }
95980
95981-struct ftrace_module_file_ops;
95982 static void __add_event_to_tracers(struct ftrace_event_call *call);
95983
95984 /* Add an additional event_call dynamically */
95985diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
95986index 0abd9b8..6a663a2 100644
95987--- a/kernel/trace/trace_mmiotrace.c
95988+++ b/kernel/trace/trace_mmiotrace.c
95989@@ -24,7 +24,7 @@ struct header_iter {
95990 static struct trace_array *mmio_trace_array;
95991 static bool overrun_detected;
95992 static unsigned long prev_overruns;
95993-static atomic_t dropped_count;
95994+static atomic_unchecked_t dropped_count;
95995
95996 static void mmio_reset_data(struct trace_array *tr)
95997 {
95998@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
95999
96000 static unsigned long count_overruns(struct trace_iterator *iter)
96001 {
96002- unsigned long cnt = atomic_xchg(&dropped_count, 0);
96003+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
96004 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
96005
96006 if (over > prev_overruns)
96007@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
96008 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
96009 sizeof(*entry), 0, pc);
96010 if (!event) {
96011- atomic_inc(&dropped_count);
96012+ atomic_inc_unchecked(&dropped_count);
96013 return;
96014 }
96015 entry = ring_buffer_event_data(event);
96016@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
96017 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
96018 sizeof(*entry), 0, pc);
96019 if (!event) {
96020- atomic_inc(&dropped_count);
96021+ atomic_inc_unchecked(&dropped_count);
96022 return;
96023 }
96024 entry = ring_buffer_event_data(event);
96025diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
96026index f3dad80..d291d61 100644
96027--- a/kernel/trace/trace_output.c
96028+++ b/kernel/trace/trace_output.c
96029@@ -322,7 +322,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
96030
96031 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
96032 if (!IS_ERR(p)) {
96033- p = mangle_path(s->buffer + s->len, p, "\n");
96034+ p = mangle_path(s->buffer + s->len, p, "\n\\");
96035 if (p) {
96036 s->len = p - s->buffer;
96037 return 1;
96038@@ -980,14 +980,16 @@ int register_ftrace_event(struct trace_event *event)
96039 goto out;
96040 }
96041
96042+ pax_open_kernel();
96043 if (event->funcs->trace == NULL)
96044- event->funcs->trace = trace_nop_print;
96045+ *(void **)&event->funcs->trace = trace_nop_print;
96046 if (event->funcs->raw == NULL)
96047- event->funcs->raw = trace_nop_print;
96048+ *(void **)&event->funcs->raw = trace_nop_print;
96049 if (event->funcs->hex == NULL)
96050- event->funcs->hex = trace_nop_print;
96051+ *(void **)&event->funcs->hex = trace_nop_print;
96052 if (event->funcs->binary == NULL)
96053- event->funcs->binary = trace_nop_print;
96054+ *(void **)&event->funcs->binary = trace_nop_print;
96055+ pax_close_kernel();
96056
96057 key = event->type & (EVENT_HASHSIZE - 1);
96058
96059diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
96060index 8a4e5cb..64f270d 100644
96061--- a/kernel/trace/trace_stack.c
96062+++ b/kernel/trace/trace_stack.c
96063@@ -91,7 +91,7 @@ check_stack(unsigned long ip, unsigned long *stack)
96064 return;
96065
96066 /* we do not handle interrupt stacks yet */
96067- if (!object_is_on_stack(stack))
96068+ if (!object_starts_on_stack(stack))
96069 return;
96070
96071 local_irq_save(flags);
96072diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
96073index fcc0256..aee880f 100644
96074--- a/kernel/user_namespace.c
96075+++ b/kernel/user_namespace.c
96076@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
96077 !kgid_has_mapping(parent_ns, group))
96078 return -EPERM;
96079
96080+#ifdef CONFIG_GRKERNSEC
96081+ /*
96082+ * This doesn't really inspire confidence:
96083+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
96084+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
96085+ * Increases kernel attack surface in areas developers
96086+ * previously cared little about ("low importance due
96087+ * to requiring "root" capability")
96088+ * To be removed when this code receives *proper* review
96089+ */
96090+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
96091+ !capable(CAP_SETGID))
96092+ return -EPERM;
96093+#endif
96094+
96095 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
96096 if (!ns)
96097 return -ENOMEM;
96098@@ -872,7 +887,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
96099 if (atomic_read(&current->mm->mm_users) > 1)
96100 return -EINVAL;
96101
96102- if (current->fs->users != 1)
96103+ if (atomic_read(&current->fs->users) != 1)
96104 return -EINVAL;
96105
96106 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
96107diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
96108index c8eac43..4b5f08f 100644
96109--- a/kernel/utsname_sysctl.c
96110+++ b/kernel/utsname_sysctl.c
96111@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
96112 static int proc_do_uts_string(struct ctl_table *table, int write,
96113 void __user *buffer, size_t *lenp, loff_t *ppos)
96114 {
96115- struct ctl_table uts_table;
96116+ ctl_table_no_const uts_table;
96117 int r;
96118 memcpy(&uts_table, table, sizeof(uts_table));
96119 uts_table.data = get_uts(table, write);
96120diff --git a/kernel/watchdog.c b/kernel/watchdog.c
96121index c3319bd..67efc3c 100644
96122--- a/kernel/watchdog.c
96123+++ b/kernel/watchdog.c
96124@@ -518,7 +518,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
96125 static void watchdog_nmi_disable(unsigned int cpu) { return; }
96126 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
96127
96128-static struct smp_hotplug_thread watchdog_threads = {
96129+static struct smp_hotplug_thread watchdog_threads __read_only = {
96130 .store = &softlockup_watchdog,
96131 .thread_should_run = watchdog_should_run,
96132 .thread_fn = watchdog,
96133diff --git a/kernel/workqueue.c b/kernel/workqueue.c
96134index 35974ac..43c9e87 100644
96135--- a/kernel/workqueue.c
96136+++ b/kernel/workqueue.c
96137@@ -4576,7 +4576,7 @@ static void rebind_workers(struct worker_pool *pool)
96138 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
96139 worker_flags |= WORKER_REBOUND;
96140 worker_flags &= ~WORKER_UNBOUND;
96141- ACCESS_ONCE(worker->flags) = worker_flags;
96142+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
96143 }
96144
96145 spin_unlock_irq(&pool->lock);
96146diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
96147index 7a638aa..20db901 100644
96148--- a/lib/Kconfig.debug
96149+++ b/lib/Kconfig.debug
96150@@ -858,7 +858,7 @@ config DEBUG_MUTEXES
96151
96152 config DEBUG_WW_MUTEX_SLOWPATH
96153 bool "Wait/wound mutex debugging: Slowpath testing"
96154- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96155+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96156 select DEBUG_LOCK_ALLOC
96157 select DEBUG_SPINLOCK
96158 select DEBUG_MUTEXES
96159@@ -871,7 +871,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
96160
96161 config DEBUG_LOCK_ALLOC
96162 bool "Lock debugging: detect incorrect freeing of live locks"
96163- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96164+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96165 select DEBUG_SPINLOCK
96166 select DEBUG_MUTEXES
96167 select LOCKDEP
96168@@ -885,7 +885,7 @@ config DEBUG_LOCK_ALLOC
96169
96170 config PROVE_LOCKING
96171 bool "Lock debugging: prove locking correctness"
96172- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96173+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96174 select LOCKDEP
96175 select DEBUG_SPINLOCK
96176 select DEBUG_MUTEXES
96177@@ -936,7 +936,7 @@ config LOCKDEP
96178
96179 config LOCK_STAT
96180 bool "Lock usage statistics"
96181- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96182+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96183 select LOCKDEP
96184 select DEBUG_SPINLOCK
96185 select DEBUG_MUTEXES
96186@@ -1418,6 +1418,7 @@ config LATENCYTOP
96187 depends on DEBUG_KERNEL
96188 depends on STACKTRACE_SUPPORT
96189 depends on PROC_FS
96190+ depends on !GRKERNSEC_HIDESYM
96191 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
96192 select KALLSYMS
96193 select KALLSYMS_ALL
96194@@ -1434,7 +1435,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
96195 config DEBUG_STRICT_USER_COPY_CHECKS
96196 bool "Strict user copy size checks"
96197 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
96198- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
96199+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
96200 help
96201 Enabling this option turns a certain set of sanity checks for user
96202 copy operations into compile time failures.
96203@@ -1554,7 +1555,7 @@ endmenu # runtime tests
96204
96205 config PROVIDE_OHCI1394_DMA_INIT
96206 bool "Remote debugging over FireWire early on boot"
96207- depends on PCI && X86
96208+ depends on PCI && X86 && !GRKERNSEC
96209 help
96210 If you want to debug problems which hang or crash the kernel early
96211 on boot and the crashing machine has a FireWire port, you can use
96212diff --git a/lib/Makefile b/lib/Makefile
96213index ba967a1..2cc869a 100644
96214--- a/lib/Makefile
96215+++ b/lib/Makefile
96216@@ -33,7 +33,6 @@ obj-y += kstrtox.o
96217 obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
96218 obj-$(CONFIG_TEST_MODULE) += test_module.o
96219 obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
96220-obj-$(CONFIG_TEST_BPF) += test_bpf.o
96221
96222 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
96223 CFLAGS_kobject.o += -DDEBUG
96224@@ -54,7 +53,7 @@ obj-$(CONFIG_BTREE) += btree.o
96225 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
96226 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
96227 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
96228-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
96229+obj-y += list_debug.o
96230 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
96231
96232 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
96233diff --git a/lib/average.c b/lib/average.c
96234index 114d1be..ab0350c 100644
96235--- a/lib/average.c
96236+++ b/lib/average.c
96237@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
96238 {
96239 unsigned long internal = ACCESS_ONCE(avg->internal);
96240
96241- ACCESS_ONCE(avg->internal) = internal ?
96242+ ACCESS_ONCE_RW(avg->internal) = internal ?
96243 (((internal << avg->weight) - internal) +
96244 (val << avg->factor)) >> avg->weight :
96245 (val << avg->factor);
96246diff --git a/lib/bitmap.c b/lib/bitmap.c
96247index 06f7e4f..f3cf2b0 100644
96248--- a/lib/bitmap.c
96249+++ b/lib/bitmap.c
96250@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
96251 {
96252 int c, old_c, totaldigits, ndigits, nchunks, nbits;
96253 u32 chunk;
96254- const char __user __force *ubuf = (const char __user __force *)buf;
96255+ const char __user *ubuf = (const char __force_user *)buf;
96256
96257 bitmap_zero(maskp, nmaskbits);
96258
96259@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
96260 {
96261 if (!access_ok(VERIFY_READ, ubuf, ulen))
96262 return -EFAULT;
96263- return __bitmap_parse((const char __force *)ubuf,
96264+ return __bitmap_parse((const char __force_kernel *)ubuf,
96265 ulen, 1, maskp, nmaskbits);
96266
96267 }
96268@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
96269 {
96270 unsigned a, b;
96271 int c, old_c, totaldigits;
96272- const char __user __force *ubuf = (const char __user __force *)buf;
96273+ const char __user *ubuf = (const char __force_user *)buf;
96274 int exp_digit, in_range;
96275
96276 totaldigits = c = 0;
96277@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
96278 {
96279 if (!access_ok(VERIFY_READ, ubuf, ulen))
96280 return -EFAULT;
96281- return __bitmap_parselist((const char __force *)ubuf,
96282+ return __bitmap_parselist((const char __force_kernel *)ubuf,
96283 ulen, 1, maskp, nmaskbits);
96284 }
96285 EXPORT_SYMBOL(bitmap_parselist_user);
96286diff --git a/lib/bug.c b/lib/bug.c
96287index d1d7c78..b354235 100644
96288--- a/lib/bug.c
96289+++ b/lib/bug.c
96290@@ -137,6 +137,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
96291 return BUG_TRAP_TYPE_NONE;
96292
96293 bug = find_bug(bugaddr);
96294+ if (!bug)
96295+ return BUG_TRAP_TYPE_NONE;
96296
96297 file = NULL;
96298 line = 0;
96299diff --git a/lib/debugobjects.c b/lib/debugobjects.c
96300index 547f7f9..a6d4ba0 100644
96301--- a/lib/debugobjects.c
96302+++ b/lib/debugobjects.c
96303@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
96304 if (limit > 4)
96305 return;
96306
96307- is_on_stack = object_is_on_stack(addr);
96308+ is_on_stack = object_starts_on_stack(addr);
96309 if (is_on_stack == onstack)
96310 return;
96311
96312diff --git a/lib/div64.c b/lib/div64.c
96313index 4382ad7..08aa558 100644
96314--- a/lib/div64.c
96315+++ b/lib/div64.c
96316@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
96317 EXPORT_SYMBOL(__div64_32);
96318
96319 #ifndef div_s64_rem
96320-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
96321+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
96322 {
96323 u64 quotient;
96324
96325@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
96326 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
96327 */
96328 #ifndef div64_u64
96329-u64 div64_u64(u64 dividend, u64 divisor)
96330+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
96331 {
96332 u32 high = divisor >> 32;
96333 u64 quot;
96334diff --git a/lib/dma-debug.c b/lib/dma-debug.c
96335index 98f2d7e..899da5c 100644
96336--- a/lib/dma-debug.c
96337+++ b/lib/dma-debug.c
96338@@ -971,7 +971,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
96339
96340 void dma_debug_add_bus(struct bus_type *bus)
96341 {
96342- struct notifier_block *nb;
96343+ notifier_block_no_const *nb;
96344
96345 if (global_disable)
96346 return;
96347@@ -1148,7 +1148,7 @@ static void check_unmap(struct dma_debug_entry *ref)
96348
96349 static void check_for_stack(struct device *dev, void *addr)
96350 {
96351- if (object_is_on_stack(addr))
96352+ if (object_starts_on_stack(addr))
96353 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
96354 "stack [addr=%p]\n", addr);
96355 }
96356diff --git a/lib/hash.c b/lib/hash.c
96357index fea973f..386626f 100644
96358--- a/lib/hash.c
96359+++ b/lib/hash.c
96360@@ -14,7 +14,7 @@
96361 #include <linux/hash.h>
96362 #include <linux/cache.h>
96363
96364-static struct fast_hash_ops arch_hash_ops __read_mostly = {
96365+static struct fast_hash_ops arch_hash_ops __read_only = {
96366 .hash = jhash,
96367 .hash2 = jhash2,
96368 };
96369diff --git a/lib/inflate.c b/lib/inflate.c
96370index 013a761..c28f3fc 100644
96371--- a/lib/inflate.c
96372+++ b/lib/inflate.c
96373@@ -269,7 +269,7 @@ static void free(void *where)
96374 malloc_ptr = free_mem_ptr;
96375 }
96376 #else
96377-#define malloc(a) kmalloc(a, GFP_KERNEL)
96378+#define malloc(a) kmalloc((a), GFP_KERNEL)
96379 #define free(a) kfree(a)
96380 #endif
96381
96382diff --git a/lib/ioremap.c b/lib/ioremap.c
96383index 0c9216c..863bd89 100644
96384--- a/lib/ioremap.c
96385+++ b/lib/ioremap.c
96386@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
96387 unsigned long next;
96388
96389 phys_addr -= addr;
96390- pmd = pmd_alloc(&init_mm, pud, addr);
96391+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
96392 if (!pmd)
96393 return -ENOMEM;
96394 do {
96395@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
96396 unsigned long next;
96397
96398 phys_addr -= addr;
96399- pud = pud_alloc(&init_mm, pgd, addr);
96400+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
96401 if (!pud)
96402 return -ENOMEM;
96403 do {
96404diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
96405index bd2bea9..6b3c95e 100644
96406--- a/lib/is_single_threaded.c
96407+++ b/lib/is_single_threaded.c
96408@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
96409 struct task_struct *p, *t;
96410 bool ret;
96411
96412+ if (!mm)
96413+ return true;
96414+
96415 if (atomic_read(&task->signal->live) != 1)
96416 return false;
96417
96418diff --git a/lib/kobject.c b/lib/kobject.c
96419index 58751bb..93a1853 100644
96420--- a/lib/kobject.c
96421+++ b/lib/kobject.c
96422@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
96423
96424
96425 static DEFINE_SPINLOCK(kobj_ns_type_lock);
96426-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
96427+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
96428
96429-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
96430+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
96431 {
96432 enum kobj_ns_type type = ops->type;
96433 int error;
96434diff --git a/lib/list_debug.c b/lib/list_debug.c
96435index c24c2f7..f0296f4 100644
96436--- a/lib/list_debug.c
96437+++ b/lib/list_debug.c
96438@@ -11,7 +11,9 @@
96439 #include <linux/bug.h>
96440 #include <linux/kernel.h>
96441 #include <linux/rculist.h>
96442+#include <linux/mm.h>
96443
96444+#ifdef CONFIG_DEBUG_LIST
96445 /*
96446 * Insert a new entry between two known consecutive entries.
96447 *
96448@@ -19,21 +21,40 @@
96449 * the prev/next entries already!
96450 */
96451
96452+static bool __list_add_debug(struct list_head *new,
96453+ struct list_head *prev,
96454+ struct list_head *next)
96455+{
96456+ if (unlikely(next->prev != prev)) {
96457+ printk(KERN_ERR "list_add corruption. next->prev should be "
96458+ "prev (%p), but was %p. (next=%p).\n",
96459+ prev, next->prev, next);
96460+ BUG();
96461+ return false;
96462+ }
96463+ if (unlikely(prev->next != next)) {
96464+ printk(KERN_ERR "list_add corruption. prev->next should be "
96465+ "next (%p), but was %p. (prev=%p).\n",
96466+ next, prev->next, prev);
96467+ BUG();
96468+ return false;
96469+ }
96470+ if (unlikely(new == prev || new == next)) {
96471+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
96472+ new, prev, next);
96473+ BUG();
96474+ return false;
96475+ }
96476+ return true;
96477+}
96478+
96479 void __list_add(struct list_head *new,
96480- struct list_head *prev,
96481- struct list_head *next)
96482+ struct list_head *prev,
96483+ struct list_head *next)
96484 {
96485- WARN(next->prev != prev,
96486- "list_add corruption. next->prev should be "
96487- "prev (%p), but was %p. (next=%p).\n",
96488- prev, next->prev, next);
96489- WARN(prev->next != next,
96490- "list_add corruption. prev->next should be "
96491- "next (%p), but was %p. (prev=%p).\n",
96492- next, prev->next, prev);
96493- WARN(new == prev || new == next,
96494- "list_add double add: new=%p, prev=%p, next=%p.\n",
96495- new, prev, next);
96496+ if (!__list_add_debug(new, prev, next))
96497+ return;
96498+
96499 next->prev = new;
96500 new->next = next;
96501 new->prev = prev;
96502@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
96503 }
96504 EXPORT_SYMBOL(__list_add);
96505
96506-void __list_del_entry(struct list_head *entry)
96507+static bool __list_del_entry_debug(struct list_head *entry)
96508 {
96509 struct list_head *prev, *next;
96510
96511 prev = entry->prev;
96512 next = entry->next;
96513
96514- if (WARN(next == LIST_POISON1,
96515- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
96516- entry, LIST_POISON1) ||
96517- WARN(prev == LIST_POISON2,
96518- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
96519- entry, LIST_POISON2) ||
96520- WARN(prev->next != entry,
96521- "list_del corruption. prev->next should be %p, "
96522- "but was %p\n", entry, prev->next) ||
96523- WARN(next->prev != entry,
96524- "list_del corruption. next->prev should be %p, "
96525- "but was %p\n", entry, next->prev))
96526+ if (unlikely(next == LIST_POISON1)) {
96527+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
96528+ entry, LIST_POISON1);
96529+ BUG();
96530+ return false;
96531+ }
96532+ if (unlikely(prev == LIST_POISON2)) {
96533+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
96534+ entry, LIST_POISON2);
96535+ BUG();
96536+ return false;
96537+ }
96538+ if (unlikely(entry->prev->next != entry)) {
96539+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
96540+ "but was %p\n", entry, prev->next);
96541+ BUG();
96542+ return false;
96543+ }
96544+ if (unlikely(entry->next->prev != entry)) {
96545+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
96546+ "but was %p\n", entry, next->prev);
96547+ BUG();
96548+ return false;
96549+ }
96550+ return true;
96551+}
96552+
96553+void __list_del_entry(struct list_head *entry)
96554+{
96555+ if (!__list_del_entry_debug(entry))
96556 return;
96557
96558- __list_del(prev, next);
96559+ __list_del(entry->prev, entry->next);
96560 }
96561 EXPORT_SYMBOL(__list_del_entry);
96562
96563@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
96564 void __list_add_rcu(struct list_head *new,
96565 struct list_head *prev, struct list_head *next)
96566 {
96567- WARN(next->prev != prev,
96568- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
96569- prev, next->prev, next);
96570- WARN(prev->next != next,
96571- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
96572- next, prev->next, prev);
96573+ if (!__list_add_debug(new, prev, next))
96574+ return;
96575+
96576 new->next = next;
96577 new->prev = prev;
96578 rcu_assign_pointer(list_next_rcu(prev), new);
96579 next->prev = new;
96580 }
96581 EXPORT_SYMBOL(__list_add_rcu);
96582+#endif
96583+
96584+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
96585+{
96586+#ifdef CONFIG_DEBUG_LIST
96587+ if (!__list_add_debug(new, prev, next))
96588+ return;
96589+#endif
96590+
96591+ pax_open_kernel();
96592+ next->prev = new;
96593+ new->next = next;
96594+ new->prev = prev;
96595+ prev->next = new;
96596+ pax_close_kernel();
96597+}
96598+EXPORT_SYMBOL(__pax_list_add);
96599+
96600+void pax_list_del(struct list_head *entry)
96601+{
96602+#ifdef CONFIG_DEBUG_LIST
96603+ if (!__list_del_entry_debug(entry))
96604+ return;
96605+#endif
96606+
96607+ pax_open_kernel();
96608+ __list_del(entry->prev, entry->next);
96609+ entry->next = LIST_POISON1;
96610+ entry->prev = LIST_POISON2;
96611+ pax_close_kernel();
96612+}
96613+EXPORT_SYMBOL(pax_list_del);
96614+
96615+void pax_list_del_init(struct list_head *entry)
96616+{
96617+ pax_open_kernel();
96618+ __list_del(entry->prev, entry->next);
96619+ INIT_LIST_HEAD(entry);
96620+ pax_close_kernel();
96621+}
96622+EXPORT_SYMBOL(pax_list_del_init);
96623+
96624+void __pax_list_add_rcu(struct list_head *new,
96625+ struct list_head *prev, struct list_head *next)
96626+{
96627+#ifdef CONFIG_DEBUG_LIST
96628+ if (!__list_add_debug(new, prev, next))
96629+ return;
96630+#endif
96631+
96632+ pax_open_kernel();
96633+ new->next = next;
96634+ new->prev = prev;
96635+ rcu_assign_pointer(list_next_rcu(prev), new);
96636+ next->prev = new;
96637+ pax_close_kernel();
96638+}
96639+EXPORT_SYMBOL(__pax_list_add_rcu);
96640+
96641+void pax_list_del_rcu(struct list_head *entry)
96642+{
96643+#ifdef CONFIG_DEBUG_LIST
96644+ if (!__list_del_entry_debug(entry))
96645+ return;
96646+#endif
96647+
96648+ pax_open_kernel();
96649+ __list_del(entry->prev, entry->next);
96650+ entry->next = LIST_POISON1;
96651+ entry->prev = LIST_POISON2;
96652+ pax_close_kernel();
96653+}
96654+EXPORT_SYMBOL(pax_list_del_rcu);
96655diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
96656index 963b703..438bc51 100644
96657--- a/lib/percpu-refcount.c
96658+++ b/lib/percpu-refcount.c
96659@@ -29,7 +29,7 @@
96660 * can't hit 0 before we've added up all the percpu refs.
96661 */
96662
96663-#define PCPU_COUNT_BIAS (1U << 31)
96664+#define PCPU_COUNT_BIAS (1U << 30)
96665
96666 /**
96667 * percpu_ref_init - initialize a percpu refcount
96668diff --git a/lib/radix-tree.c b/lib/radix-tree.c
96669index 3291a8e..346a91e 100644
96670--- a/lib/radix-tree.c
96671+++ b/lib/radix-tree.c
96672@@ -67,7 +67,7 @@ struct radix_tree_preload {
96673 int nr;
96674 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
96675 };
96676-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
96677+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
96678
96679 static inline void *ptr_to_indirect(void *ptr)
96680 {
96681diff --git a/lib/random32.c b/lib/random32.c
96682index fa5da61..35fe9af 100644
96683--- a/lib/random32.c
96684+++ b/lib/random32.c
96685@@ -42,7 +42,7 @@
96686 static void __init prandom_state_selftest(void);
96687 #endif
96688
96689-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
96690+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
96691
96692 /**
96693 * prandom_u32_state - seeded pseudo-random number generator.
96694diff --git a/lib/rbtree.c b/lib/rbtree.c
96695index 65f4eff..2cfa167 100644
96696--- a/lib/rbtree.c
96697+++ b/lib/rbtree.c
96698@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
96699 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
96700
96701 static const struct rb_augment_callbacks dummy_callbacks = {
96702- dummy_propagate, dummy_copy, dummy_rotate
96703+ .propagate = dummy_propagate,
96704+ .copy = dummy_copy,
96705+ .rotate = dummy_rotate
96706 };
96707
96708 void rb_insert_color(struct rb_node *node, struct rb_root *root)
96709diff --git a/lib/show_mem.c b/lib/show_mem.c
96710index 0922579..9d7adb9 100644
96711--- a/lib/show_mem.c
96712+++ b/lib/show_mem.c
96713@@ -44,6 +44,6 @@ void show_mem(unsigned int filter)
96714 quicklist_total_size());
96715 #endif
96716 #ifdef CONFIG_MEMORY_FAILURE
96717- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
96718+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
96719 #endif
96720 }
96721diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
96722index bb2b201..46abaf9 100644
96723--- a/lib/strncpy_from_user.c
96724+++ b/lib/strncpy_from_user.c
96725@@ -21,7 +21,7 @@
96726 */
96727 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
96728 {
96729- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
96730+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
96731 long res = 0;
96732
96733 /*
96734diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
96735index a28df52..3d55877 100644
96736--- a/lib/strnlen_user.c
96737+++ b/lib/strnlen_user.c
96738@@ -26,7 +26,7 @@
96739 */
96740 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
96741 {
96742- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
96743+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
96744 long align, res = 0;
96745 unsigned long c;
96746
96747diff --git a/lib/swiotlb.c b/lib/swiotlb.c
96748index 4abda07..b9d3765 100644
96749--- a/lib/swiotlb.c
96750+++ b/lib/swiotlb.c
96751@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
96752
96753 void
96754 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
96755- dma_addr_t dev_addr)
96756+ dma_addr_t dev_addr, struct dma_attrs *attrs)
96757 {
96758 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
96759
96760diff --git a/lib/test_bpf.c b/lib/test_bpf.c
96761deleted file mode 100644
96762index c579e0f..0000000
96763--- a/lib/test_bpf.c
96764+++ /dev/null
96765@@ -1,1929 +0,0 @@
96766-/*
96767- * Testsuite for BPF interpreter and BPF JIT compiler
96768- *
96769- * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
96770- *
96771- * This program is free software; you can redistribute it and/or
96772- * modify it under the terms of version 2 of the GNU General Public
96773- * License as published by the Free Software Foundation.
96774- *
96775- * This program is distributed in the hope that it will be useful, but
96776- * WITHOUT ANY WARRANTY; without even the implied warranty of
96777- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
96778- * General Public License for more details.
96779- */
96780-
96781-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
96782-
96783-#include <linux/init.h>
96784-#include <linux/module.h>
96785-#include <linux/filter.h>
96786-#include <linux/skbuff.h>
96787-#include <linux/netdevice.h>
96788-#include <linux/if_vlan.h>
96789-
96790-/* General test specific settings */
96791-#define MAX_SUBTESTS 3
96792-#define MAX_TESTRUNS 10000
96793-#define MAX_DATA 128
96794-#define MAX_INSNS 512
96795-#define MAX_K 0xffffFFFF
96796-
96797-/* Few constants used to init test 'skb' */
96798-#define SKB_TYPE 3
96799-#define SKB_MARK 0x1234aaaa
96800-#define SKB_HASH 0x1234aaab
96801-#define SKB_QUEUE_MAP 123
96802-#define SKB_VLAN_TCI 0xffff
96803-#define SKB_DEV_IFINDEX 577
96804-#define SKB_DEV_TYPE 588
96805-
96806-/* Redefine REGs to make tests less verbose */
96807-#define R0 BPF_REG_0
96808-#define R1 BPF_REG_1
96809-#define R2 BPF_REG_2
96810-#define R3 BPF_REG_3
96811-#define R4 BPF_REG_4
96812-#define R5 BPF_REG_5
96813-#define R6 BPF_REG_6
96814-#define R7 BPF_REG_7
96815-#define R8 BPF_REG_8
96816-#define R9 BPF_REG_9
96817-#define R10 BPF_REG_10
96818-
96819-/* Flags that can be passed to test cases */
96820-#define FLAG_NO_DATA BIT(0)
96821-#define FLAG_EXPECTED_FAIL BIT(1)
96822-
96823-enum {
96824- CLASSIC = BIT(6), /* Old BPF instructions only. */
96825- INTERNAL = BIT(7), /* Extended instruction set. */
96826-};
96827-
96828-#define TEST_TYPE_MASK (CLASSIC | INTERNAL)
96829-
96830-struct bpf_test {
96831- const char *descr;
96832- union {
96833- struct sock_filter insns[MAX_INSNS];
96834- struct sock_filter_int insns_int[MAX_INSNS];
96835- } u;
96836- __u8 aux;
96837- __u8 data[MAX_DATA];
96838- struct {
96839- int data_size;
96840- __u32 result;
96841- } test[MAX_SUBTESTS];
96842-};
96843-
96844-static struct bpf_test tests[] = {
96845- {
96846- "TAX",
96847- .u.insns = {
96848- BPF_STMT(BPF_LD | BPF_IMM, 1),
96849- BPF_STMT(BPF_MISC | BPF_TAX, 0),
96850- BPF_STMT(BPF_LD | BPF_IMM, 2),
96851- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
96852- BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */
96853- BPF_STMT(BPF_MISC | BPF_TAX, 0),
96854- BPF_STMT(BPF_LD | BPF_LEN, 0),
96855- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
96856- BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */
96857- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
96858- BPF_STMT(BPF_RET | BPF_A, 0)
96859- },
96860- CLASSIC,
96861- { 10, 20, 30, 40, 50 },
96862- { { 2, 10 }, { 3, 20 }, { 4, 30 } },
96863- },
96864- {
96865- "TXA",
96866- .u.insns = {
96867- BPF_STMT(BPF_LDX | BPF_LEN, 0),
96868- BPF_STMT(BPF_MISC | BPF_TXA, 0),
96869- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
96870- BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
96871- },
96872- CLASSIC,
96873- { 10, 20, 30, 40, 50 },
96874- { { 1, 2 }, { 3, 6 }, { 4, 8 } },
96875- },
96876- {
96877- "ADD_SUB_MUL_K",
96878- .u.insns = {
96879- BPF_STMT(BPF_LD | BPF_IMM, 1),
96880- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2),
96881- BPF_STMT(BPF_LDX | BPF_IMM, 3),
96882- BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
96883- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff),
96884- BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
96885- BPF_STMT(BPF_RET | BPF_A, 0)
96886- },
96887- CLASSIC | FLAG_NO_DATA,
96888- { },
96889- { { 0, 0xfffffffd } }
96890- },
96891- {
96892- "DIV_KX",
96893- .u.insns = {
96894- BPF_STMT(BPF_LD | BPF_IMM, 8),
96895- BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
96896- BPF_STMT(BPF_MISC | BPF_TAX, 0),
96897- BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
96898- BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
96899- BPF_STMT(BPF_MISC | BPF_TAX, 0),
96900- BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
96901- BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
96902- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
96903- BPF_STMT(BPF_RET | BPF_A, 0)
96904- },
96905- CLASSIC | FLAG_NO_DATA,
96906- { },
96907- { { 0, 0x40000001 } }
96908- },
96909- {
96910- "AND_OR_LSH_K",
96911- .u.insns = {
96912- BPF_STMT(BPF_LD | BPF_IMM, 0xff),
96913- BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
96914- BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27),
96915- BPF_STMT(BPF_MISC | BPF_TAX, 0),
96916- BPF_STMT(BPF_LD | BPF_IMM, 0xf),
96917- BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0),
96918- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
96919- BPF_STMT(BPF_RET | BPF_A, 0)
96920- },
96921- CLASSIC | FLAG_NO_DATA,
96922- { },
96923- { { 0, 0x800000ff }, { 1, 0x800000ff } },
96924- },
96925- {
96926- "LD_IMM_0",
96927- .u.insns = {
96928- BPF_STMT(BPF_LD | BPF_IMM, 0), /* ld #0 */
96929- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0, 1, 0),
96930- BPF_STMT(BPF_RET | BPF_K, 0),
96931- BPF_STMT(BPF_RET | BPF_K, 1),
96932- },
96933- CLASSIC,
96934- { },
96935- { { 1, 1 } },
96936- },
96937- {
96938- "LD_IND",
96939- .u.insns = {
96940- BPF_STMT(BPF_LDX | BPF_LEN, 0),
96941- BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
96942- BPF_STMT(BPF_RET | BPF_K, 1)
96943- },
96944- CLASSIC,
96945- { },
96946- { { 1, 0 }, { 10, 0 }, { 60, 0 } },
96947- },
96948- {
96949- "LD_ABS",
96950- .u.insns = {
96951- BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
96952- BPF_STMT(BPF_RET | BPF_K, 1)
96953- },
96954- CLASSIC,
96955- { },
96956- { { 1, 0 }, { 10, 0 }, { 60, 0 } },
96957- },
96958- {
96959- "LD_ABS_LL",
96960- .u.insns = {
96961- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF),
96962- BPF_STMT(BPF_MISC | BPF_TAX, 0),
96963- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1),
96964- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
96965- BPF_STMT(BPF_RET | BPF_A, 0)
96966- },
96967- CLASSIC,
96968- { 1, 2, 3 },
96969- { { 1, 0 }, { 2, 3 } },
96970- },
96971- {
96972- "LD_IND_LL",
96973- .u.insns = {
96974- BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1),
96975- BPF_STMT(BPF_LDX | BPF_LEN, 0),
96976- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
96977- BPF_STMT(BPF_MISC | BPF_TAX, 0),
96978- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
96979- BPF_STMT(BPF_RET | BPF_A, 0)
96980- },
96981- CLASSIC,
96982- { 1, 2, 3, 0xff },
96983- { { 1, 1 }, { 3, 3 }, { 4, 0xff } },
96984- },
96985- {
96986- "LD_ABS_NET",
96987- .u.insns = {
96988- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF),
96989- BPF_STMT(BPF_MISC | BPF_TAX, 0),
96990- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1),
96991- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
96992- BPF_STMT(BPF_RET | BPF_A, 0)
96993- },
96994- CLASSIC,
96995- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
96996- { { 15, 0 }, { 16, 3 } },
96997- },
96998- {
96999- "LD_IND_NET",
97000- .u.insns = {
97001- BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15),
97002- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97003- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97004- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97005- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
97006- BPF_STMT(BPF_RET | BPF_A, 0)
97007- },
97008- CLASSIC,
97009- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
97010- { { 14, 0 }, { 15, 1 }, { 17, 3 } },
97011- },
97012- {
97013- "LD_PKTTYPE",
97014- .u.insns = {
97015- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97016- SKF_AD_OFF + SKF_AD_PKTTYPE),
97017- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
97018- BPF_STMT(BPF_RET | BPF_K, 1),
97019- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97020- SKF_AD_OFF + SKF_AD_PKTTYPE),
97021- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
97022- BPF_STMT(BPF_RET | BPF_K, 1),
97023- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97024- SKF_AD_OFF + SKF_AD_PKTTYPE),
97025- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
97026- BPF_STMT(BPF_RET | BPF_K, 1),
97027- BPF_STMT(BPF_RET | BPF_A, 0)
97028- },
97029- CLASSIC,
97030- { },
97031- { { 1, 3 }, { 10, 3 } },
97032- },
97033- {
97034- "LD_MARK",
97035- .u.insns = {
97036- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97037- SKF_AD_OFF + SKF_AD_MARK),
97038- BPF_STMT(BPF_RET | BPF_A, 0)
97039- },
97040- CLASSIC,
97041- { },
97042- { { 1, SKB_MARK}, { 10, SKB_MARK} },
97043- },
97044- {
97045- "LD_RXHASH",
97046- .u.insns = {
97047- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97048- SKF_AD_OFF + SKF_AD_RXHASH),
97049- BPF_STMT(BPF_RET | BPF_A, 0)
97050- },
97051- CLASSIC,
97052- { },
97053- { { 1, SKB_HASH}, { 10, SKB_HASH} },
97054- },
97055- {
97056- "LD_QUEUE",
97057- .u.insns = {
97058- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97059- SKF_AD_OFF + SKF_AD_QUEUE),
97060- BPF_STMT(BPF_RET | BPF_A, 0)
97061- },
97062- CLASSIC,
97063- { },
97064- { { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
97065- },
97066- {
97067- "LD_PROTOCOL",
97068- .u.insns = {
97069- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1),
97070- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0),
97071- BPF_STMT(BPF_RET | BPF_K, 0),
97072- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97073- SKF_AD_OFF + SKF_AD_PROTOCOL),
97074- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97075- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
97076- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0),
97077- BPF_STMT(BPF_RET | BPF_K, 0),
97078- BPF_STMT(BPF_MISC | BPF_TXA, 0),
97079- BPF_STMT(BPF_RET | BPF_A, 0)
97080- },
97081- CLASSIC,
97082- { 10, 20, 30 },
97083- { { 10, ETH_P_IP }, { 100, ETH_P_IP } },
97084- },
97085- {
97086- "LD_VLAN_TAG",
97087- .u.insns = {
97088- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97089- SKF_AD_OFF + SKF_AD_VLAN_TAG),
97090- BPF_STMT(BPF_RET | BPF_A, 0)
97091- },
97092- CLASSIC,
97093- { },
97094- {
97095- { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
97096- { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }
97097- },
97098- },
97099- {
97100- "LD_VLAN_TAG_PRESENT",
97101- .u.insns = {
97102- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97103- SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
97104- BPF_STMT(BPF_RET | BPF_A, 0)
97105- },
97106- CLASSIC,
97107- { },
97108- {
97109- { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
97110- { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
97111- },
97112- },
97113- {
97114- "LD_IFINDEX",
97115- .u.insns = {
97116- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97117- SKF_AD_OFF + SKF_AD_IFINDEX),
97118- BPF_STMT(BPF_RET | BPF_A, 0)
97119- },
97120- CLASSIC,
97121- { },
97122- { { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
97123- },
97124- {
97125- "LD_HATYPE",
97126- .u.insns = {
97127- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97128- SKF_AD_OFF + SKF_AD_HATYPE),
97129- BPF_STMT(BPF_RET | BPF_A, 0)
97130- },
97131- CLASSIC,
97132- { },
97133- { { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
97134- },
97135- {
97136- "LD_CPU",
97137- .u.insns = {
97138- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97139- SKF_AD_OFF + SKF_AD_CPU),
97140- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97141- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97142- SKF_AD_OFF + SKF_AD_CPU),
97143- BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
97144- BPF_STMT(BPF_RET | BPF_A, 0)
97145- },
97146- CLASSIC,
97147- { },
97148- { { 1, 0 }, { 10, 0 } },
97149- },
97150- {
97151- "LD_NLATTR",
97152- .u.insns = {
97153- BPF_STMT(BPF_LDX | BPF_IMM, 2),
97154- BPF_STMT(BPF_MISC | BPF_TXA, 0),
97155- BPF_STMT(BPF_LDX | BPF_IMM, 3),
97156- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97157- SKF_AD_OFF + SKF_AD_NLATTR),
97158- BPF_STMT(BPF_RET | BPF_A, 0)
97159- },
97160- CLASSIC,
97161-#ifdef __BIG_ENDIAN
97162- { 0xff, 0xff, 0, 4, 0, 2, 0, 4, 0, 3 },
97163-#else
97164- { 0xff, 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
97165-#endif
97166- { { 4, 0 }, { 20, 6 } },
97167- },
97168- {
97169- "LD_NLATTR_NEST",
97170- .u.insns = {
97171- BPF_STMT(BPF_LD | BPF_IMM, 2),
97172- BPF_STMT(BPF_LDX | BPF_IMM, 3),
97173- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97174- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97175- BPF_STMT(BPF_LD | BPF_IMM, 2),
97176- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97177- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97178- BPF_STMT(BPF_LD | BPF_IMM, 2),
97179- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97180- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97181- BPF_STMT(BPF_LD | BPF_IMM, 2),
97182- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97183- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97184- BPF_STMT(BPF_LD | BPF_IMM, 2),
97185- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97186- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97187- BPF_STMT(BPF_LD | BPF_IMM, 2),
97188- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97189- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97190- BPF_STMT(BPF_LD | BPF_IMM, 2),
97191- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97192- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97193- BPF_STMT(BPF_LD | BPF_IMM, 2),
97194- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97195- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97196- BPF_STMT(BPF_RET | BPF_A, 0)
97197- },
97198- CLASSIC,
97199-#ifdef __BIG_ENDIAN
97200- { 0xff, 0xff, 0, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3 },
97201-#else
97202- { 0xff, 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
97203-#endif
97204- { { 4, 0 }, { 20, 10 } },
97205- },
97206- {
97207- "LD_PAYLOAD_OFF",
97208- .u.insns = {
97209- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97210- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
97211- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97212- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
97213- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97214- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
97215- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97216- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
97217- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97218- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
97219- BPF_STMT(BPF_RET | BPF_A, 0)
97220- },
97221- CLASSIC,
97222- /* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
97223- * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
97224- * id 9737, seq 1, length 64
97225- */
97226- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
97227- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
97228- 0x08, 0x00,
97229- 0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40,
97230- 0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 },
97231- { { 30, 0 }, { 100, 42 } },
97232- },
97233- {
97234- "LD_ANC_XOR",
97235- .u.insns = {
97236- BPF_STMT(BPF_LD | BPF_IMM, 10),
97237- BPF_STMT(BPF_LDX | BPF_IMM, 300),
97238- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97239- SKF_AD_OFF + SKF_AD_ALU_XOR_X),
97240- BPF_STMT(BPF_RET | BPF_A, 0)
97241- },
97242- CLASSIC,
97243- { },
97244- { { 4, 10 ^ 300 }, { 20, 10 ^ 300 } },
97245- },
97246- {
97247- "SPILL_FILL",
97248- .u.insns = {
97249- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97250- BPF_STMT(BPF_LD | BPF_IMM, 2),
97251- BPF_STMT(BPF_ALU | BPF_RSH, 1),
97252- BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
97253- BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */
97254- BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000),
97255- BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */
97256- BPF_STMT(BPF_STX, 15), /* M3 = len */
97257- BPF_STMT(BPF_LDX | BPF_MEM, 1),
97258- BPF_STMT(BPF_LD | BPF_MEM, 2),
97259- BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
97260- BPF_STMT(BPF_LDX | BPF_MEM, 15),
97261- BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
97262- BPF_STMT(BPF_RET | BPF_A, 0)
97263- },
97264- CLASSIC,
97265- { },
97266- { { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
97267- },
97268- {
97269- "JEQ",
97270- .u.insns = {
97271- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97272- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
97273- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1),
97274- BPF_STMT(BPF_RET | BPF_K, 1),
97275- BPF_STMT(BPF_RET | BPF_K, MAX_K)
97276- },
97277- CLASSIC,
97278- { 3, 3, 3, 3, 3 },
97279- { { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
97280- },
97281- {
97282- "JGT",
97283- .u.insns = {
97284- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97285- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
97286- BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1),
97287- BPF_STMT(BPF_RET | BPF_K, 1),
97288- BPF_STMT(BPF_RET | BPF_K, MAX_K)
97289- },
97290- CLASSIC,
97291- { 4, 4, 4, 3, 3 },
97292- { { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
97293- },
97294- {
97295- "JGE",
97296- .u.insns = {
97297- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97298- BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K),
97299- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0),
97300- BPF_STMT(BPF_RET | BPF_K, 10),
97301- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0),
97302- BPF_STMT(BPF_RET | BPF_K, 20),
97303- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0),
97304- BPF_STMT(BPF_RET | BPF_K, 30),
97305- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0),
97306- BPF_STMT(BPF_RET | BPF_K, 40),
97307- BPF_STMT(BPF_RET | BPF_K, MAX_K)
97308- },
97309- CLASSIC,
97310- { 1, 2, 3, 4, 5 },
97311- { { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
97312- },
97313- {
97314- "JSET",
97315- .u.insns = {
97316- BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
97317- BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1),
97318- BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
97319- BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
97320- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97321- BPF_STMT(BPF_MISC | BPF_TXA, 0),
97322- BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4),
97323- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97324- BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0),
97325- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1),
97326- BPF_STMT(BPF_RET | BPF_K, 10),
97327- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1),
97328- BPF_STMT(BPF_RET | BPF_K, 20),
97329- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
97330- BPF_STMT(BPF_RET | BPF_K, 30),
97331- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
97332- BPF_STMT(BPF_RET | BPF_K, 30),
97333- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
97334- BPF_STMT(BPF_RET | BPF_K, 30),
97335- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
97336- BPF_STMT(BPF_RET | BPF_K, 30),
97337- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
97338- BPF_STMT(BPF_RET | BPF_K, 30),
97339- BPF_STMT(BPF_RET | BPF_K, MAX_K)
97340- },
97341- CLASSIC,
97342- { 0, 0xAA, 0x55, 1 },
97343- { { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
97344- },
97345- {
97346- "tcpdump port 22",
97347- .u.insns = {
97348- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
97349- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */
97350- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20),
97351- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
97352- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
97353- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17),
97354- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54),
97355- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0),
97356- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56),
97357- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13),
97358- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */
97359- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
97360- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
97361- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
97362- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8),
97363- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
97364- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0),
97365- BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
97366- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
97367- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
97368- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
97369- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1),
97370- BPF_STMT(BPF_RET | BPF_K, 0xffff),
97371- BPF_STMT(BPF_RET | BPF_K, 0),
97372- },
97373- CLASSIC,
97374- /* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
97375- * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
97376- * seq 1305692979:1305693027, ack 3650467037, win 65535,
97377- * options [nop,nop,TS val 2502645400 ecr 3971138], length 48
97378- */
97379- { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
97380- 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
97381- 0x08, 0x00,
97382- 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
97383- 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
97384- 0x0a, 0x01, 0x01, 0x95, /* ip src */
97385- 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
97386- 0xc2, 0x24,
97387- 0x00, 0x16 /* dst port */ },
97388- { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
97389- },
97390- {
97391- "tcpdump complex",
97392- .u.insns = {
97393- /* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] -
97394- * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and
97395- * (len > 115 or len < 30000000000)' -d
97396- */
97397- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
97398- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0),
97399- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29),
97400- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
97401- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27),
97402- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
97403- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0),
97404- BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
97405- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
97406- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
97407- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
97408- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20),
97409- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16),
97410- BPF_STMT(BPF_ST, 1),
97411- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14),
97412- BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf),
97413- BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2),
97414- BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */
97415- BPF_STMT(BPF_LD | BPF_MEM, 1),
97416- BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
97417- BPF_STMT(BPF_ST, 5),
97418- BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
97419- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26),
97420- BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
97421- BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2),
97422- BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */
97423- BPF_STMT(BPF_LD | BPF_MEM, 5),
97424- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0),
97425- BPF_STMT(BPF_LD | BPF_LEN, 0),
97426- BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0),
97427- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0),
97428- BPF_STMT(BPF_RET | BPF_K, 0xffff),
97429- BPF_STMT(BPF_RET | BPF_K, 0),
97430- },
97431- CLASSIC,
97432- { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
97433- 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
97434- 0x08, 0x00,
97435- 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
97436- 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
97437- 0x0a, 0x01, 0x01, 0x95, /* ip src */
97438- 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
97439- 0xc2, 0x24,
97440- 0x00, 0x16 /* dst port */ },
97441- { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
97442- },
97443- {
97444- "RET_A",
97445- .u.insns = {
97446- /* check that unitialized X and A contain zeros */
97447- BPF_STMT(BPF_MISC | BPF_TXA, 0),
97448- BPF_STMT(BPF_RET | BPF_A, 0)
97449- },
97450- CLASSIC,
97451- { },
97452- { {1, 0}, {2, 0} },
97453- },
97454- {
97455- "INT: ADD trivial",
97456- .u.insns_int = {
97457- BPF_ALU64_IMM(BPF_MOV, R1, 1),
97458- BPF_ALU64_IMM(BPF_ADD, R1, 2),
97459- BPF_ALU64_IMM(BPF_MOV, R2, 3),
97460- BPF_ALU64_REG(BPF_SUB, R1, R2),
97461- BPF_ALU64_IMM(BPF_ADD, R1, -1),
97462- BPF_ALU64_IMM(BPF_MUL, R1, 3),
97463- BPF_ALU64_REG(BPF_MOV, R0, R1),
97464- BPF_EXIT_INSN(),
97465- },
97466- INTERNAL,
97467- { },
97468- { { 0, 0xfffffffd } }
97469- },
97470- {
97471- "INT: MUL_X",
97472- .u.insns_int = {
97473- BPF_ALU64_IMM(BPF_MOV, R0, -1),
97474- BPF_ALU64_IMM(BPF_MOV, R1, -1),
97475- BPF_ALU64_IMM(BPF_MOV, R2, 3),
97476- BPF_ALU64_REG(BPF_MUL, R1, R2),
97477- BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1),
97478- BPF_EXIT_INSN(),
97479- BPF_ALU64_IMM(BPF_MOV, R0, 1),
97480- BPF_EXIT_INSN(),
97481- },
97482- INTERNAL,
97483- { },
97484- { { 0, 1 } }
97485- },
97486- {
97487- "INT: MUL_X2",
97488- .u.insns_int = {
97489- BPF_ALU32_IMM(BPF_MOV, R0, -1),
97490- BPF_ALU32_IMM(BPF_MOV, R1, -1),
97491- BPF_ALU32_IMM(BPF_MOV, R2, 3),
97492- BPF_ALU64_REG(BPF_MUL, R1, R2),
97493- BPF_ALU64_IMM(BPF_RSH, R1, 8),
97494- BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1),
97495- BPF_EXIT_INSN(),
97496- BPF_ALU32_IMM(BPF_MOV, R0, 1),
97497- BPF_EXIT_INSN(),
97498- },
97499- INTERNAL,
97500- { },
97501- { { 0, 1 } }
97502- },
97503- {
97504- "INT: MUL32_X",
97505- .u.insns_int = {
97506- BPF_ALU32_IMM(BPF_MOV, R0, -1),
97507- BPF_ALU64_IMM(BPF_MOV, R1, -1),
97508- BPF_ALU32_IMM(BPF_MOV, R2, 3),
97509- BPF_ALU32_REG(BPF_MUL, R1, R2),
97510- BPF_ALU64_IMM(BPF_RSH, R1, 8),
97511- BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1),
97512- BPF_EXIT_INSN(),
97513- BPF_ALU32_IMM(BPF_MOV, R0, 1),
97514- BPF_EXIT_INSN(),
97515- },
97516- INTERNAL,
97517- { },
97518- { { 0, 1 } }
97519- },
97520- {
97521- /* Have to test all register combinations, since
97522- * JITing of different registers will produce
97523- * different asm code.
97524- */
97525- "INT: ADD 64-bit",
97526- .u.insns_int = {
97527- BPF_ALU64_IMM(BPF_MOV, R0, 0),
97528- BPF_ALU64_IMM(BPF_MOV, R1, 1),
97529- BPF_ALU64_IMM(BPF_MOV, R2, 2),
97530- BPF_ALU64_IMM(BPF_MOV, R3, 3),
97531- BPF_ALU64_IMM(BPF_MOV, R4, 4),
97532- BPF_ALU64_IMM(BPF_MOV, R5, 5),
97533- BPF_ALU64_IMM(BPF_MOV, R6, 6),
97534- BPF_ALU64_IMM(BPF_MOV, R7, 7),
97535- BPF_ALU64_IMM(BPF_MOV, R8, 8),
97536- BPF_ALU64_IMM(BPF_MOV, R9, 9),
97537- BPF_ALU64_IMM(BPF_ADD, R0, 20),
97538- BPF_ALU64_IMM(BPF_ADD, R1, 20),
97539- BPF_ALU64_IMM(BPF_ADD, R2, 20),
97540- BPF_ALU64_IMM(BPF_ADD, R3, 20),
97541- BPF_ALU64_IMM(BPF_ADD, R4, 20),
97542- BPF_ALU64_IMM(BPF_ADD, R5, 20),
97543- BPF_ALU64_IMM(BPF_ADD, R6, 20),
97544- BPF_ALU64_IMM(BPF_ADD, R7, 20),
97545- BPF_ALU64_IMM(BPF_ADD, R8, 20),
97546- BPF_ALU64_IMM(BPF_ADD, R9, 20),
97547- BPF_ALU64_IMM(BPF_SUB, R0, 10),
97548- BPF_ALU64_IMM(BPF_SUB, R1, 10),
97549- BPF_ALU64_IMM(BPF_SUB, R2, 10),
97550- BPF_ALU64_IMM(BPF_SUB, R3, 10),
97551- BPF_ALU64_IMM(BPF_SUB, R4, 10),
97552- BPF_ALU64_IMM(BPF_SUB, R5, 10),
97553- BPF_ALU64_IMM(BPF_SUB, R6, 10),
97554- BPF_ALU64_IMM(BPF_SUB, R7, 10),
97555- BPF_ALU64_IMM(BPF_SUB, R8, 10),
97556- BPF_ALU64_IMM(BPF_SUB, R9, 10),
97557- BPF_ALU64_REG(BPF_ADD, R0, R0),
97558- BPF_ALU64_REG(BPF_ADD, R0, R1),
97559- BPF_ALU64_REG(BPF_ADD, R0, R2),
97560- BPF_ALU64_REG(BPF_ADD, R0, R3),
97561- BPF_ALU64_REG(BPF_ADD, R0, R4),
97562- BPF_ALU64_REG(BPF_ADD, R0, R5),
97563- BPF_ALU64_REG(BPF_ADD, R0, R6),
97564- BPF_ALU64_REG(BPF_ADD, R0, R7),
97565- BPF_ALU64_REG(BPF_ADD, R0, R8),
97566- BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */
97567- BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
97568- BPF_EXIT_INSN(),
97569- BPF_ALU64_REG(BPF_ADD, R1, R0),
97570- BPF_ALU64_REG(BPF_ADD, R1, R1),
97571- BPF_ALU64_REG(BPF_ADD, R1, R2),
97572- BPF_ALU64_REG(BPF_ADD, R1, R3),
97573- BPF_ALU64_REG(BPF_ADD, R1, R4),
97574- BPF_ALU64_REG(BPF_ADD, R1, R5),
97575- BPF_ALU64_REG(BPF_ADD, R1, R6),
97576- BPF_ALU64_REG(BPF_ADD, R1, R7),
97577- BPF_ALU64_REG(BPF_ADD, R1, R8),
97578- BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */
97579- BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
97580- BPF_EXIT_INSN(),
97581- BPF_ALU64_REG(BPF_ADD, R2, R0),
97582- BPF_ALU64_REG(BPF_ADD, R2, R1),
97583- BPF_ALU64_REG(BPF_ADD, R2, R2),
97584- BPF_ALU64_REG(BPF_ADD, R2, R3),
97585- BPF_ALU64_REG(BPF_ADD, R2, R4),
97586- BPF_ALU64_REG(BPF_ADD, R2, R5),
97587- BPF_ALU64_REG(BPF_ADD, R2, R6),
97588- BPF_ALU64_REG(BPF_ADD, R2, R7),
97589- BPF_ALU64_REG(BPF_ADD, R2, R8),
97590- BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
97591- BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
97592- BPF_EXIT_INSN(),
97593- BPF_ALU64_REG(BPF_ADD, R3, R0),
97594- BPF_ALU64_REG(BPF_ADD, R3, R1),
97595- BPF_ALU64_REG(BPF_ADD, R3, R2),
97596- BPF_ALU64_REG(BPF_ADD, R3, R3),
97597- BPF_ALU64_REG(BPF_ADD, R3, R4),
97598- BPF_ALU64_REG(BPF_ADD, R3, R5),
97599- BPF_ALU64_REG(BPF_ADD, R3, R6),
97600- BPF_ALU64_REG(BPF_ADD, R3, R7),
97601- BPF_ALU64_REG(BPF_ADD, R3, R8),
97602- BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
97603- BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
97604- BPF_EXIT_INSN(),
97605- BPF_ALU64_REG(BPF_ADD, R4, R0),
97606- BPF_ALU64_REG(BPF_ADD, R4, R1),
97607- BPF_ALU64_REG(BPF_ADD, R4, R2),
97608- BPF_ALU64_REG(BPF_ADD, R4, R3),
97609- BPF_ALU64_REG(BPF_ADD, R4, R4),
97610- BPF_ALU64_REG(BPF_ADD, R4, R5),
97611- BPF_ALU64_REG(BPF_ADD, R4, R6),
97612- BPF_ALU64_REG(BPF_ADD, R4, R7),
97613- BPF_ALU64_REG(BPF_ADD, R4, R8),
97614- BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
97615- BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
97616- BPF_EXIT_INSN(),
97617- BPF_ALU64_REG(BPF_ADD, R5, R0),
97618- BPF_ALU64_REG(BPF_ADD, R5, R1),
97619- BPF_ALU64_REG(BPF_ADD, R5, R2),
97620- BPF_ALU64_REG(BPF_ADD, R5, R3),
97621- BPF_ALU64_REG(BPF_ADD, R5, R4),
97622- BPF_ALU64_REG(BPF_ADD, R5, R5),
97623- BPF_ALU64_REG(BPF_ADD, R5, R6),
97624- BPF_ALU64_REG(BPF_ADD, R5, R7),
97625- BPF_ALU64_REG(BPF_ADD, R5, R8),
97626- BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
97627- BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
97628- BPF_EXIT_INSN(),
97629- BPF_ALU64_REG(BPF_ADD, R6, R0),
97630- BPF_ALU64_REG(BPF_ADD, R6, R1),
97631- BPF_ALU64_REG(BPF_ADD, R6, R2),
97632- BPF_ALU64_REG(BPF_ADD, R6, R3),
97633- BPF_ALU64_REG(BPF_ADD, R6, R4),
97634- BPF_ALU64_REG(BPF_ADD, R6, R5),
97635- BPF_ALU64_REG(BPF_ADD, R6, R6),
97636- BPF_ALU64_REG(BPF_ADD, R6, R7),
97637- BPF_ALU64_REG(BPF_ADD, R6, R8),
97638- BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
97639- BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
97640- BPF_EXIT_INSN(),
97641- BPF_ALU64_REG(BPF_ADD, R7, R0),
97642- BPF_ALU64_REG(BPF_ADD, R7, R1),
97643- BPF_ALU64_REG(BPF_ADD, R7, R2),
97644- BPF_ALU64_REG(BPF_ADD, R7, R3),
97645- BPF_ALU64_REG(BPF_ADD, R7, R4),
97646- BPF_ALU64_REG(BPF_ADD, R7, R5),
97647- BPF_ALU64_REG(BPF_ADD, R7, R6),
97648- BPF_ALU64_REG(BPF_ADD, R7, R7),
97649- BPF_ALU64_REG(BPF_ADD, R7, R8),
97650- BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
97651- BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
97652- BPF_EXIT_INSN(),
97653- BPF_ALU64_REG(BPF_ADD, R8, R0),
97654- BPF_ALU64_REG(BPF_ADD, R8, R1),
97655- BPF_ALU64_REG(BPF_ADD, R8, R2),
97656- BPF_ALU64_REG(BPF_ADD, R8, R3),
97657- BPF_ALU64_REG(BPF_ADD, R8, R4),
97658- BPF_ALU64_REG(BPF_ADD, R8, R5),
97659- BPF_ALU64_REG(BPF_ADD, R8, R6),
97660- BPF_ALU64_REG(BPF_ADD, R8, R7),
97661- BPF_ALU64_REG(BPF_ADD, R8, R8),
97662- BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
97663- BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
97664- BPF_EXIT_INSN(),
97665- BPF_ALU64_REG(BPF_ADD, R9, R0),
97666- BPF_ALU64_REG(BPF_ADD, R9, R1),
97667- BPF_ALU64_REG(BPF_ADD, R9, R2),
97668- BPF_ALU64_REG(BPF_ADD, R9, R3),
97669- BPF_ALU64_REG(BPF_ADD, R9, R4),
97670- BPF_ALU64_REG(BPF_ADD, R9, R5),
97671- BPF_ALU64_REG(BPF_ADD, R9, R6),
97672- BPF_ALU64_REG(BPF_ADD, R9, R7),
97673- BPF_ALU64_REG(BPF_ADD, R9, R8),
97674- BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
97675- BPF_ALU64_REG(BPF_MOV, R0, R9),
97676- BPF_EXIT_INSN(),
97677- },
97678- INTERNAL,
97679- { },
97680- { { 0, 2957380 } }
97681- },
97682- {
97683- "INT: ADD 32-bit",
97684- .u.insns_int = {
97685- BPF_ALU32_IMM(BPF_MOV, R0, 20),
97686- BPF_ALU32_IMM(BPF_MOV, R1, 1),
97687- BPF_ALU32_IMM(BPF_MOV, R2, 2),
97688- BPF_ALU32_IMM(BPF_MOV, R3, 3),
97689- BPF_ALU32_IMM(BPF_MOV, R4, 4),
97690- BPF_ALU32_IMM(BPF_MOV, R5, 5),
97691- BPF_ALU32_IMM(BPF_MOV, R6, 6),
97692- BPF_ALU32_IMM(BPF_MOV, R7, 7),
97693- BPF_ALU32_IMM(BPF_MOV, R8, 8),
97694- BPF_ALU32_IMM(BPF_MOV, R9, 9),
97695- BPF_ALU64_IMM(BPF_ADD, R1, 10),
97696- BPF_ALU64_IMM(BPF_ADD, R2, 10),
97697- BPF_ALU64_IMM(BPF_ADD, R3, 10),
97698- BPF_ALU64_IMM(BPF_ADD, R4, 10),
97699- BPF_ALU64_IMM(BPF_ADD, R5, 10),
97700- BPF_ALU64_IMM(BPF_ADD, R6, 10),
97701- BPF_ALU64_IMM(BPF_ADD, R7, 10),
97702- BPF_ALU64_IMM(BPF_ADD, R8, 10),
97703- BPF_ALU64_IMM(BPF_ADD, R9, 10),
97704- BPF_ALU32_REG(BPF_ADD, R0, R1),
97705- BPF_ALU32_REG(BPF_ADD, R0, R2),
97706- BPF_ALU32_REG(BPF_ADD, R0, R3),
97707- BPF_ALU32_REG(BPF_ADD, R0, R4),
97708- BPF_ALU32_REG(BPF_ADD, R0, R5),
97709- BPF_ALU32_REG(BPF_ADD, R0, R6),
97710- BPF_ALU32_REG(BPF_ADD, R0, R7),
97711- BPF_ALU32_REG(BPF_ADD, R0, R8),
97712- BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */
97713- BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
97714- BPF_EXIT_INSN(),
97715- BPF_ALU32_REG(BPF_ADD, R1, R0),
97716- BPF_ALU32_REG(BPF_ADD, R1, R1),
97717- BPF_ALU32_REG(BPF_ADD, R1, R2),
97718- BPF_ALU32_REG(BPF_ADD, R1, R3),
97719- BPF_ALU32_REG(BPF_ADD, R1, R4),
97720- BPF_ALU32_REG(BPF_ADD, R1, R5),
97721- BPF_ALU32_REG(BPF_ADD, R1, R6),
97722- BPF_ALU32_REG(BPF_ADD, R1, R7),
97723- BPF_ALU32_REG(BPF_ADD, R1, R8),
97724- BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */
97725- BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
97726- BPF_EXIT_INSN(),
97727- BPF_ALU32_REG(BPF_ADD, R2, R0),
97728- BPF_ALU32_REG(BPF_ADD, R2, R1),
97729- BPF_ALU32_REG(BPF_ADD, R2, R2),
97730- BPF_ALU32_REG(BPF_ADD, R2, R3),
97731- BPF_ALU32_REG(BPF_ADD, R2, R4),
97732- BPF_ALU32_REG(BPF_ADD, R2, R5),
97733- BPF_ALU32_REG(BPF_ADD, R2, R6),
97734- BPF_ALU32_REG(BPF_ADD, R2, R7),
97735- BPF_ALU32_REG(BPF_ADD, R2, R8),
97736- BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
97737- BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
97738- BPF_EXIT_INSN(),
97739- BPF_ALU32_REG(BPF_ADD, R3, R0),
97740- BPF_ALU32_REG(BPF_ADD, R3, R1),
97741- BPF_ALU32_REG(BPF_ADD, R3, R2),
97742- BPF_ALU32_REG(BPF_ADD, R3, R3),
97743- BPF_ALU32_REG(BPF_ADD, R3, R4),
97744- BPF_ALU32_REG(BPF_ADD, R3, R5),
97745- BPF_ALU32_REG(BPF_ADD, R3, R6),
97746- BPF_ALU32_REG(BPF_ADD, R3, R7),
97747- BPF_ALU32_REG(BPF_ADD, R3, R8),
97748- BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
97749- BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
97750- BPF_EXIT_INSN(),
97751- BPF_ALU32_REG(BPF_ADD, R4, R0),
97752- BPF_ALU32_REG(BPF_ADD, R4, R1),
97753- BPF_ALU32_REG(BPF_ADD, R4, R2),
97754- BPF_ALU32_REG(BPF_ADD, R4, R3),
97755- BPF_ALU32_REG(BPF_ADD, R4, R4),
97756- BPF_ALU32_REG(BPF_ADD, R4, R5),
97757- BPF_ALU32_REG(BPF_ADD, R4, R6),
97758- BPF_ALU32_REG(BPF_ADD, R4, R7),
97759- BPF_ALU32_REG(BPF_ADD, R4, R8),
97760- BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
97761- BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
97762- BPF_EXIT_INSN(),
97763- BPF_ALU32_REG(BPF_ADD, R5, R0),
97764- BPF_ALU32_REG(BPF_ADD, R5, R1),
97765- BPF_ALU32_REG(BPF_ADD, R5, R2),
97766- BPF_ALU32_REG(BPF_ADD, R5, R3),
97767- BPF_ALU32_REG(BPF_ADD, R5, R4),
97768- BPF_ALU32_REG(BPF_ADD, R5, R5),
97769- BPF_ALU32_REG(BPF_ADD, R5, R6),
97770- BPF_ALU32_REG(BPF_ADD, R5, R7),
97771- BPF_ALU32_REG(BPF_ADD, R5, R8),
97772- BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
97773- BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
97774- BPF_EXIT_INSN(),
97775- BPF_ALU32_REG(BPF_ADD, R6, R0),
97776- BPF_ALU32_REG(BPF_ADD, R6, R1),
97777- BPF_ALU32_REG(BPF_ADD, R6, R2),
97778- BPF_ALU32_REG(BPF_ADD, R6, R3),
97779- BPF_ALU32_REG(BPF_ADD, R6, R4),
97780- BPF_ALU32_REG(BPF_ADD, R6, R5),
97781- BPF_ALU32_REG(BPF_ADD, R6, R6),
97782- BPF_ALU32_REG(BPF_ADD, R6, R7),
97783- BPF_ALU32_REG(BPF_ADD, R6, R8),
97784- BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
97785- BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
97786- BPF_EXIT_INSN(),
97787- BPF_ALU32_REG(BPF_ADD, R7, R0),
97788- BPF_ALU32_REG(BPF_ADD, R7, R1),
97789- BPF_ALU32_REG(BPF_ADD, R7, R2),
97790- BPF_ALU32_REG(BPF_ADD, R7, R3),
97791- BPF_ALU32_REG(BPF_ADD, R7, R4),
97792- BPF_ALU32_REG(BPF_ADD, R7, R5),
97793- BPF_ALU32_REG(BPF_ADD, R7, R6),
97794- BPF_ALU32_REG(BPF_ADD, R7, R7),
97795- BPF_ALU32_REG(BPF_ADD, R7, R8),
97796- BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
97797- BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
97798- BPF_EXIT_INSN(),
97799- BPF_ALU32_REG(BPF_ADD, R8, R0),
97800- BPF_ALU32_REG(BPF_ADD, R8, R1),
97801- BPF_ALU32_REG(BPF_ADD, R8, R2),
97802- BPF_ALU32_REG(BPF_ADD, R8, R3),
97803- BPF_ALU32_REG(BPF_ADD, R8, R4),
97804- BPF_ALU32_REG(BPF_ADD, R8, R5),
97805- BPF_ALU32_REG(BPF_ADD, R8, R6),
97806- BPF_ALU32_REG(BPF_ADD, R8, R7),
97807- BPF_ALU32_REG(BPF_ADD, R8, R8),
97808- BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
97809- BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
97810- BPF_EXIT_INSN(),
97811- BPF_ALU32_REG(BPF_ADD, R9, R0),
97812- BPF_ALU32_REG(BPF_ADD, R9, R1),
97813- BPF_ALU32_REG(BPF_ADD, R9, R2),
97814- BPF_ALU32_REG(BPF_ADD, R9, R3),
97815- BPF_ALU32_REG(BPF_ADD, R9, R4),
97816- BPF_ALU32_REG(BPF_ADD, R9, R5),
97817- BPF_ALU32_REG(BPF_ADD, R9, R6),
97818- BPF_ALU32_REG(BPF_ADD, R9, R7),
97819- BPF_ALU32_REG(BPF_ADD, R9, R8),
97820- BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
97821- BPF_ALU32_REG(BPF_MOV, R0, R9),
97822- BPF_EXIT_INSN(),
97823- },
97824- INTERNAL,
97825- { },
97826- { { 0, 2957380 } }
97827- },
97828- { /* Mainly checking JIT here. */
97829- "INT: SUB",
97830- .u.insns_int = {
97831- BPF_ALU64_IMM(BPF_MOV, R0, 0),
97832- BPF_ALU64_IMM(BPF_MOV, R1, 1),
97833- BPF_ALU64_IMM(BPF_MOV, R2, 2),
97834- BPF_ALU64_IMM(BPF_MOV, R3, 3),
97835- BPF_ALU64_IMM(BPF_MOV, R4, 4),
97836- BPF_ALU64_IMM(BPF_MOV, R5, 5),
97837- BPF_ALU64_IMM(BPF_MOV, R6, 6),
97838- BPF_ALU64_IMM(BPF_MOV, R7, 7),
97839- BPF_ALU64_IMM(BPF_MOV, R8, 8),
97840- BPF_ALU64_IMM(BPF_MOV, R9, 9),
97841- BPF_ALU64_REG(BPF_SUB, R0, R0),
97842- BPF_ALU64_REG(BPF_SUB, R0, R1),
97843- BPF_ALU64_REG(BPF_SUB, R0, R2),
97844- BPF_ALU64_REG(BPF_SUB, R0, R3),
97845- BPF_ALU64_REG(BPF_SUB, R0, R4),
97846- BPF_ALU64_REG(BPF_SUB, R0, R5),
97847- BPF_ALU64_REG(BPF_SUB, R0, R6),
97848- BPF_ALU64_REG(BPF_SUB, R0, R7),
97849- BPF_ALU64_REG(BPF_SUB, R0, R8),
97850- BPF_ALU64_REG(BPF_SUB, R0, R9),
97851- BPF_ALU64_IMM(BPF_SUB, R0, 10),
97852- BPF_JMP_IMM(BPF_JEQ, R0, -55, 1),
97853- BPF_EXIT_INSN(),
97854- BPF_ALU64_REG(BPF_SUB, R1, R0),
97855- BPF_ALU64_REG(BPF_SUB, R1, R2),
97856- BPF_ALU64_REG(BPF_SUB, R1, R3),
97857- BPF_ALU64_REG(BPF_SUB, R1, R4),
97858- BPF_ALU64_REG(BPF_SUB, R1, R5),
97859- BPF_ALU64_REG(BPF_SUB, R1, R6),
97860- BPF_ALU64_REG(BPF_SUB, R1, R7),
97861- BPF_ALU64_REG(BPF_SUB, R1, R8),
97862- BPF_ALU64_REG(BPF_SUB, R1, R9),
97863- BPF_ALU64_IMM(BPF_SUB, R1, 10),
97864- BPF_ALU64_REG(BPF_SUB, R2, R0),
97865- BPF_ALU64_REG(BPF_SUB, R2, R1),
97866- BPF_ALU64_REG(BPF_SUB, R2, R3),
97867- BPF_ALU64_REG(BPF_SUB, R2, R4),
97868- BPF_ALU64_REG(BPF_SUB, R2, R5),
97869- BPF_ALU64_REG(BPF_SUB, R2, R6),
97870- BPF_ALU64_REG(BPF_SUB, R2, R7),
97871- BPF_ALU64_REG(BPF_SUB, R2, R8),
97872- BPF_ALU64_REG(BPF_SUB, R2, R9),
97873- BPF_ALU64_IMM(BPF_SUB, R2, 10),
97874- BPF_ALU64_REG(BPF_SUB, R3, R0),
97875- BPF_ALU64_REG(BPF_SUB, R3, R1),
97876- BPF_ALU64_REG(BPF_SUB, R3, R2),
97877- BPF_ALU64_REG(BPF_SUB, R3, R4),
97878- BPF_ALU64_REG(BPF_SUB, R3, R5),
97879- BPF_ALU64_REG(BPF_SUB, R3, R6),
97880- BPF_ALU64_REG(BPF_SUB, R3, R7),
97881- BPF_ALU64_REG(BPF_SUB, R3, R8),
97882- BPF_ALU64_REG(BPF_SUB, R3, R9),
97883- BPF_ALU64_IMM(BPF_SUB, R3, 10),
97884- BPF_ALU64_REG(BPF_SUB, R4, R0),
97885- BPF_ALU64_REG(BPF_SUB, R4, R1),
97886- BPF_ALU64_REG(BPF_SUB, R4, R2),
97887- BPF_ALU64_REG(BPF_SUB, R4, R3),
97888- BPF_ALU64_REG(BPF_SUB, R4, R5),
97889- BPF_ALU64_REG(BPF_SUB, R4, R6),
97890- BPF_ALU64_REG(BPF_SUB, R4, R7),
97891- BPF_ALU64_REG(BPF_SUB, R4, R8),
97892- BPF_ALU64_REG(BPF_SUB, R4, R9),
97893- BPF_ALU64_IMM(BPF_SUB, R4, 10),
97894- BPF_ALU64_REG(BPF_SUB, R5, R0),
97895- BPF_ALU64_REG(BPF_SUB, R5, R1),
97896- BPF_ALU64_REG(BPF_SUB, R5, R2),
97897- BPF_ALU64_REG(BPF_SUB, R5, R3),
97898- BPF_ALU64_REG(BPF_SUB, R5, R4),
97899- BPF_ALU64_REG(BPF_SUB, R5, R6),
97900- BPF_ALU64_REG(BPF_SUB, R5, R7),
97901- BPF_ALU64_REG(BPF_SUB, R5, R8),
97902- BPF_ALU64_REG(BPF_SUB, R5, R9),
97903- BPF_ALU64_IMM(BPF_SUB, R5, 10),
97904- BPF_ALU64_REG(BPF_SUB, R6, R0),
97905- BPF_ALU64_REG(BPF_SUB, R6, R1),
97906- BPF_ALU64_REG(BPF_SUB, R6, R2),
97907- BPF_ALU64_REG(BPF_SUB, R6, R3),
97908- BPF_ALU64_REG(BPF_SUB, R6, R4),
97909- BPF_ALU64_REG(BPF_SUB, R6, R5),
97910- BPF_ALU64_REG(BPF_SUB, R6, R7),
97911- BPF_ALU64_REG(BPF_SUB, R6, R8),
97912- BPF_ALU64_REG(BPF_SUB, R6, R9),
97913- BPF_ALU64_IMM(BPF_SUB, R6, 10),
97914- BPF_ALU64_REG(BPF_SUB, R7, R0),
97915- BPF_ALU64_REG(BPF_SUB, R7, R1),
97916- BPF_ALU64_REG(BPF_SUB, R7, R2),
97917- BPF_ALU64_REG(BPF_SUB, R7, R3),
97918- BPF_ALU64_REG(BPF_SUB, R7, R4),
97919- BPF_ALU64_REG(BPF_SUB, R7, R5),
97920- BPF_ALU64_REG(BPF_SUB, R7, R6),
97921- BPF_ALU64_REG(BPF_SUB, R7, R8),
97922- BPF_ALU64_REG(BPF_SUB, R7, R9),
97923- BPF_ALU64_IMM(BPF_SUB, R7, 10),
97924- BPF_ALU64_REG(BPF_SUB, R8, R0),
97925- BPF_ALU64_REG(BPF_SUB, R8, R1),
97926- BPF_ALU64_REG(BPF_SUB, R8, R2),
97927- BPF_ALU64_REG(BPF_SUB, R8, R3),
97928- BPF_ALU64_REG(BPF_SUB, R8, R4),
97929- BPF_ALU64_REG(BPF_SUB, R8, R5),
97930- BPF_ALU64_REG(BPF_SUB, R8, R6),
97931- BPF_ALU64_REG(BPF_SUB, R8, R7),
97932- BPF_ALU64_REG(BPF_SUB, R8, R9),
97933- BPF_ALU64_IMM(BPF_SUB, R8, 10),
97934- BPF_ALU64_REG(BPF_SUB, R9, R0),
97935- BPF_ALU64_REG(BPF_SUB, R9, R1),
97936- BPF_ALU64_REG(BPF_SUB, R9, R2),
97937- BPF_ALU64_REG(BPF_SUB, R9, R3),
97938- BPF_ALU64_REG(BPF_SUB, R9, R4),
97939- BPF_ALU64_REG(BPF_SUB, R9, R5),
97940- BPF_ALU64_REG(BPF_SUB, R9, R6),
97941- BPF_ALU64_REG(BPF_SUB, R9, R7),
97942- BPF_ALU64_REG(BPF_SUB, R9, R8),
97943- BPF_ALU64_IMM(BPF_SUB, R9, 10),
97944- BPF_ALU64_IMM(BPF_SUB, R0, 10),
97945- BPF_ALU64_IMM(BPF_NEG, R0, 0),
97946- BPF_ALU64_REG(BPF_SUB, R0, R1),
97947- BPF_ALU64_REG(BPF_SUB, R0, R2),
97948- BPF_ALU64_REG(BPF_SUB, R0, R3),
97949- BPF_ALU64_REG(BPF_SUB, R0, R4),
97950- BPF_ALU64_REG(BPF_SUB, R0, R5),
97951- BPF_ALU64_REG(BPF_SUB, R0, R6),
97952- BPF_ALU64_REG(BPF_SUB, R0, R7),
97953- BPF_ALU64_REG(BPF_SUB, R0, R8),
97954- BPF_ALU64_REG(BPF_SUB, R0, R9),
97955- BPF_EXIT_INSN(),
97956- },
97957- INTERNAL,
97958- { },
97959- { { 0, 11 } }
97960- },
97961- { /* Mainly checking JIT here. */
97962- "INT: XOR",
97963- .u.insns_int = {
97964- BPF_ALU64_REG(BPF_SUB, R0, R0),
97965- BPF_ALU64_REG(BPF_XOR, R1, R1),
97966- BPF_JMP_REG(BPF_JEQ, R0, R1, 1),
97967- BPF_EXIT_INSN(),
97968- BPF_ALU64_IMM(BPF_MOV, R0, 10),
97969- BPF_ALU64_IMM(BPF_MOV, R1, -1),
97970- BPF_ALU64_REG(BPF_SUB, R1, R1),
97971- BPF_ALU64_REG(BPF_XOR, R2, R2),
97972- BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
97973- BPF_EXIT_INSN(),
97974- BPF_ALU64_REG(BPF_SUB, R2, R2),
97975- BPF_ALU64_REG(BPF_XOR, R3, R3),
97976- BPF_ALU64_IMM(BPF_MOV, R0, 10),
97977- BPF_ALU64_IMM(BPF_MOV, R1, -1),
97978- BPF_JMP_REG(BPF_JEQ, R2, R3, 1),
97979- BPF_EXIT_INSN(),
97980- BPF_ALU64_REG(BPF_SUB, R3, R3),
97981- BPF_ALU64_REG(BPF_XOR, R4, R4),
97982- BPF_ALU64_IMM(BPF_MOV, R2, 1),
97983- BPF_ALU64_IMM(BPF_MOV, R5, -1),
97984- BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
97985- BPF_EXIT_INSN(),
97986- BPF_ALU64_REG(BPF_SUB, R4, R4),
97987- BPF_ALU64_REG(BPF_XOR, R5, R5),
97988- BPF_ALU64_IMM(BPF_MOV, R3, 1),
97989- BPF_ALU64_IMM(BPF_MOV, R7, -1),
97990- BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
97991- BPF_EXIT_INSN(),
97992- BPF_ALU64_IMM(BPF_MOV, R5, 1),
97993- BPF_ALU64_REG(BPF_SUB, R5, R5),
97994- BPF_ALU64_REG(BPF_XOR, R6, R6),
97995- BPF_ALU64_IMM(BPF_MOV, R1, 1),
97996- BPF_ALU64_IMM(BPF_MOV, R8, -1),
97997- BPF_JMP_REG(BPF_JEQ, R5, R6, 1),
97998- BPF_EXIT_INSN(),
97999- BPF_ALU64_REG(BPF_SUB, R6, R6),
98000- BPF_ALU64_REG(BPF_XOR, R7, R7),
98001- BPF_JMP_REG(BPF_JEQ, R7, R6, 1),
98002- BPF_EXIT_INSN(),
98003- BPF_ALU64_REG(BPF_SUB, R7, R7),
98004- BPF_ALU64_REG(BPF_XOR, R8, R8),
98005- BPF_JMP_REG(BPF_JEQ, R7, R8, 1),
98006- BPF_EXIT_INSN(),
98007- BPF_ALU64_REG(BPF_SUB, R8, R8),
98008- BPF_ALU64_REG(BPF_XOR, R9, R9),
98009- BPF_JMP_REG(BPF_JEQ, R9, R8, 1),
98010- BPF_EXIT_INSN(),
98011- BPF_ALU64_REG(BPF_SUB, R9, R9),
98012- BPF_ALU64_REG(BPF_XOR, R0, R0),
98013- BPF_JMP_REG(BPF_JEQ, R9, R0, 1),
98014- BPF_EXIT_INSN(),
98015- BPF_ALU64_REG(BPF_SUB, R1, R1),
98016- BPF_ALU64_REG(BPF_XOR, R0, R0),
98017- BPF_JMP_REG(BPF_JEQ, R9, R0, 2),
98018- BPF_ALU64_IMM(BPF_MOV, R0, 0),
98019- BPF_EXIT_INSN(),
98020- BPF_ALU64_IMM(BPF_MOV, R0, 1),
98021- BPF_EXIT_INSN(),
98022- },
98023- INTERNAL,
98024- { },
98025- { { 0, 1 } }
98026- },
98027- { /* Mainly checking JIT here. */
98028- "INT: MUL",
98029- .u.insns_int = {
98030- BPF_ALU64_IMM(BPF_MOV, R0, 11),
98031- BPF_ALU64_IMM(BPF_MOV, R1, 1),
98032- BPF_ALU64_IMM(BPF_MOV, R2, 2),
98033- BPF_ALU64_IMM(BPF_MOV, R3, 3),
98034- BPF_ALU64_IMM(BPF_MOV, R4, 4),
98035- BPF_ALU64_IMM(BPF_MOV, R5, 5),
98036- BPF_ALU64_IMM(BPF_MOV, R6, 6),
98037- BPF_ALU64_IMM(BPF_MOV, R7, 7),
98038- BPF_ALU64_IMM(BPF_MOV, R8, 8),
98039- BPF_ALU64_IMM(BPF_MOV, R9, 9),
98040- BPF_ALU64_REG(BPF_MUL, R0, R0),
98041- BPF_ALU64_REG(BPF_MUL, R0, R1),
98042- BPF_ALU64_REG(BPF_MUL, R0, R2),
98043- BPF_ALU64_REG(BPF_MUL, R0, R3),
98044- BPF_ALU64_REG(BPF_MUL, R0, R4),
98045- BPF_ALU64_REG(BPF_MUL, R0, R5),
98046- BPF_ALU64_REG(BPF_MUL, R0, R6),
98047- BPF_ALU64_REG(BPF_MUL, R0, R7),
98048- BPF_ALU64_REG(BPF_MUL, R0, R8),
98049- BPF_ALU64_REG(BPF_MUL, R0, R9),
98050- BPF_ALU64_IMM(BPF_MUL, R0, 10),
98051- BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1),
98052- BPF_EXIT_INSN(),
98053- BPF_ALU64_REG(BPF_MUL, R1, R0),
98054- BPF_ALU64_REG(BPF_MUL, R1, R2),
98055- BPF_ALU64_REG(BPF_MUL, R1, R3),
98056- BPF_ALU64_REG(BPF_MUL, R1, R4),
98057- BPF_ALU64_REG(BPF_MUL, R1, R5),
98058- BPF_ALU64_REG(BPF_MUL, R1, R6),
98059- BPF_ALU64_REG(BPF_MUL, R1, R7),
98060- BPF_ALU64_REG(BPF_MUL, R1, R8),
98061- BPF_ALU64_REG(BPF_MUL, R1, R9),
98062- BPF_ALU64_IMM(BPF_MUL, R1, 10),
98063- BPF_ALU64_REG(BPF_MOV, R2, R1),
98064- BPF_ALU64_IMM(BPF_RSH, R2, 32),
98065- BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1),
98066- BPF_EXIT_INSN(),
98067- BPF_ALU64_IMM(BPF_LSH, R1, 32),
98068- BPF_ALU64_IMM(BPF_ARSH, R1, 32),
98069- BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1),
98070- BPF_EXIT_INSN(),
98071- BPF_ALU64_REG(BPF_MUL, R2, R0),
98072- BPF_ALU64_REG(BPF_MUL, R2, R1),
98073- BPF_ALU64_REG(BPF_MUL, R2, R3),
98074- BPF_ALU64_REG(BPF_MUL, R2, R4),
98075- BPF_ALU64_REG(BPF_MUL, R2, R5),
98076- BPF_ALU64_REG(BPF_MUL, R2, R6),
98077- BPF_ALU64_REG(BPF_MUL, R2, R7),
98078- BPF_ALU64_REG(BPF_MUL, R2, R8),
98079- BPF_ALU64_REG(BPF_MUL, R2, R9),
98080- BPF_ALU64_IMM(BPF_MUL, R2, 10),
98081- BPF_ALU64_IMM(BPF_RSH, R2, 32),
98082- BPF_ALU64_REG(BPF_MOV, R0, R2),
98083- BPF_EXIT_INSN(),
98084- },
98085- INTERNAL,
98086- { },
98087- { { 0, 0x35d97ef2 } }
98088- },
98089- {
98090- "INT: ALU MIX",
98091- .u.insns_int = {
98092- BPF_ALU64_IMM(BPF_MOV, R0, 11),
98093- BPF_ALU64_IMM(BPF_ADD, R0, -1),
98094- BPF_ALU64_IMM(BPF_MOV, R2, 2),
98095- BPF_ALU64_IMM(BPF_XOR, R2, 3),
98096- BPF_ALU64_REG(BPF_DIV, R0, R2),
98097- BPF_JMP_IMM(BPF_JEQ, R0, 10, 1),
98098- BPF_EXIT_INSN(),
98099- BPF_ALU64_IMM(BPF_MOD, R0, 3),
98100- BPF_JMP_IMM(BPF_JEQ, R0, 1, 1),
98101- BPF_EXIT_INSN(),
98102- BPF_ALU64_IMM(BPF_MOV, R0, -1),
98103- BPF_EXIT_INSN(),
98104- },
98105- INTERNAL,
98106- { },
98107- { { 0, -1 } }
98108- },
98109- {
98110- "INT: DIV + ABS",
98111- .u.insns_int = {
98112- BPF_ALU64_REG(BPF_MOV, R6, R1),
98113- BPF_LD_ABS(BPF_B, 3),
98114- BPF_ALU64_IMM(BPF_MOV, R2, 2),
98115- BPF_ALU32_REG(BPF_DIV, R0, R2),
98116- BPF_ALU64_REG(BPF_MOV, R8, R0),
98117- BPF_LD_ABS(BPF_B, 4),
98118- BPF_ALU64_REG(BPF_ADD, R8, R0),
98119- BPF_LD_IND(BPF_B, R8, -70),
98120- BPF_EXIT_INSN(),
98121- },
98122- INTERNAL,
98123- { 10, 20, 30, 40, 50 },
98124- { { 4, 0 }, { 5, 10 } }
98125- },
98126- {
98127- "INT: DIV by zero",
98128- .u.insns_int = {
98129- BPF_ALU64_REG(BPF_MOV, R6, R1),
98130- BPF_ALU64_IMM(BPF_MOV, R7, 0),
98131- BPF_LD_ABS(BPF_B, 3),
98132- BPF_ALU32_REG(BPF_DIV, R0, R7),
98133- BPF_EXIT_INSN(),
98134- },
98135- INTERNAL,
98136- { 10, 20, 30, 40, 50 },
98137- { { 3, 0 }, { 4, 0 } }
98138- },
98139- {
98140- "check: missing ret",
98141- .u.insns = {
98142- BPF_STMT(BPF_LD | BPF_IMM, 1),
98143- },
98144- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98145- { },
98146- { }
98147- },
98148- {
98149- "check: div_k_0",
98150- .u.insns = {
98151- BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
98152- BPF_STMT(BPF_RET | BPF_K, 0)
98153- },
98154- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98155- { },
98156- { }
98157- },
98158- {
98159- "check: unknown insn",
98160- .u.insns = {
98161- /* seccomp insn, rejected in socket filter */
98162- BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
98163- BPF_STMT(BPF_RET | BPF_K, 0)
98164- },
98165- CLASSIC | FLAG_EXPECTED_FAIL,
98166- { },
98167- { }
98168- },
98169- {
98170- "check: out of range spill/fill",
98171- .u.insns = {
98172- BPF_STMT(BPF_STX, 16),
98173- BPF_STMT(BPF_RET | BPF_K, 0)
98174- },
98175- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98176- { },
98177- { }
98178- },
98179- {
98180- "JUMPS + HOLES",
98181- .u.insns = {
98182- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98183- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15),
98184- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98185- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98186- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98187- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98188- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98189- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98190- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98191- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98192- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98193- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98194- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98195- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98196- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98197- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4),
98198- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98199- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2),
98200- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98201- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
98202- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
98203- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98204- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98205- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98206- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98207- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98208- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98209- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98210- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98211- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98212- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98213- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98214- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98215- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98216- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3),
98217- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2),
98218- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98219- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
98220- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
98221- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98222- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98223- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98224- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98225- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98226- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98227- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98228- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98229- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98230- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98231- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98232- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98233- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98234- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3),
98235- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2),
98236- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98237- BPF_STMT(BPF_RET | BPF_A, 0),
98238- BPF_STMT(BPF_RET | BPF_A, 0),
98239- },
98240- CLASSIC,
98241- { 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8,
98242- 0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4,
98243- 0x08, 0x00,
98244- 0x45, 0x00, 0x00, 0x28, 0x00, 0x00,
98245- 0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */
98246- 0xc0, 0xa8, 0x33, 0x01,
98247- 0xc0, 0xa8, 0x33, 0x02,
98248- 0xbb, 0xb6,
98249- 0xa9, 0xfa,
98250- 0x00, 0x14, 0x00, 0x00,
98251- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98252- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98253- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98254- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98255- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98256- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98257- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98258- 0xcc, 0xcc, 0xcc, 0xcc },
98259- { { 88, 0x001b } }
98260- },
98261- {
98262- "check: RET X",
98263- .u.insns = {
98264- BPF_STMT(BPF_RET | BPF_X, 0),
98265- },
98266- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98267- { },
98268- { },
98269- },
98270- {
98271- "check: LDX + RET X",
98272- .u.insns = {
98273- BPF_STMT(BPF_LDX | BPF_IMM, 42),
98274- BPF_STMT(BPF_RET | BPF_X, 0),
98275- },
98276- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98277- { },
98278- { },
98279- },
98280- { /* Mainly checking JIT here. */
98281- "M[]: alt STX + LDX",
98282- .u.insns = {
98283- BPF_STMT(BPF_LDX | BPF_IMM, 100),
98284- BPF_STMT(BPF_STX, 0),
98285- BPF_STMT(BPF_LDX | BPF_MEM, 0),
98286- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98287- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98288- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98289- BPF_STMT(BPF_STX, 1),
98290- BPF_STMT(BPF_LDX | BPF_MEM, 1),
98291- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98292- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98293- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98294- BPF_STMT(BPF_STX, 2),
98295- BPF_STMT(BPF_LDX | BPF_MEM, 2),
98296- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98297- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98298- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98299- BPF_STMT(BPF_STX, 3),
98300- BPF_STMT(BPF_LDX | BPF_MEM, 3),
98301- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98302- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98303- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98304- BPF_STMT(BPF_STX, 4),
98305- BPF_STMT(BPF_LDX | BPF_MEM, 4),
98306- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98307- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98308- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98309- BPF_STMT(BPF_STX, 5),
98310- BPF_STMT(BPF_LDX | BPF_MEM, 5),
98311- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98312- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98313- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98314- BPF_STMT(BPF_STX, 6),
98315- BPF_STMT(BPF_LDX | BPF_MEM, 6),
98316- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98317- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98318- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98319- BPF_STMT(BPF_STX, 7),
98320- BPF_STMT(BPF_LDX | BPF_MEM, 7),
98321- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98322- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98323- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98324- BPF_STMT(BPF_STX, 8),
98325- BPF_STMT(BPF_LDX | BPF_MEM, 8),
98326- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98327- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98328- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98329- BPF_STMT(BPF_STX, 9),
98330- BPF_STMT(BPF_LDX | BPF_MEM, 9),
98331- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98332- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98333- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98334- BPF_STMT(BPF_STX, 10),
98335- BPF_STMT(BPF_LDX | BPF_MEM, 10),
98336- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98337- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98338- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98339- BPF_STMT(BPF_STX, 11),
98340- BPF_STMT(BPF_LDX | BPF_MEM, 11),
98341- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98342- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98343- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98344- BPF_STMT(BPF_STX, 12),
98345- BPF_STMT(BPF_LDX | BPF_MEM, 12),
98346- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98347- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98348- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98349- BPF_STMT(BPF_STX, 13),
98350- BPF_STMT(BPF_LDX | BPF_MEM, 13),
98351- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98352- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98353- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98354- BPF_STMT(BPF_STX, 14),
98355- BPF_STMT(BPF_LDX | BPF_MEM, 14),
98356- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98357- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98358- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98359- BPF_STMT(BPF_STX, 15),
98360- BPF_STMT(BPF_LDX | BPF_MEM, 15),
98361- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98362- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98363- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98364- BPF_STMT(BPF_RET | BPF_A, 0),
98365- },
98366- CLASSIC | FLAG_NO_DATA,
98367- { },
98368- { { 0, 116 } },
98369- },
98370- { /* Mainly checking JIT here. */
98371- "M[]: full STX + full LDX",
98372- .u.insns = {
98373- BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb),
98374- BPF_STMT(BPF_STX, 0),
98375- BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae),
98376- BPF_STMT(BPF_STX, 1),
98377- BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf),
98378- BPF_STMT(BPF_STX, 2),
98379- BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc),
98380- BPF_STMT(BPF_STX, 3),
98381- BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb),
98382- BPF_STMT(BPF_STX, 4),
98383- BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda),
98384- BPF_STMT(BPF_STX, 5),
98385- BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb),
98386- BPF_STMT(BPF_STX, 6),
98387- BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade),
98388- BPF_STMT(BPF_STX, 7),
98389- BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec),
98390- BPF_STMT(BPF_STX, 8),
98391- BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc),
98392- BPF_STMT(BPF_STX, 9),
98393- BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac),
98394- BPF_STMT(BPF_STX, 10),
98395- BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea),
98396- BPF_STMT(BPF_STX, 11),
98397- BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb),
98398- BPF_STMT(BPF_STX, 12),
98399- BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf),
98400- BPF_STMT(BPF_STX, 13),
98401- BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde),
98402- BPF_STMT(BPF_STX, 14),
98403- BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad),
98404- BPF_STMT(BPF_STX, 15),
98405- BPF_STMT(BPF_LDX | BPF_MEM, 0),
98406- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98407- BPF_STMT(BPF_LDX | BPF_MEM, 1),
98408- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98409- BPF_STMT(BPF_LDX | BPF_MEM, 2),
98410- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98411- BPF_STMT(BPF_LDX | BPF_MEM, 3),
98412- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98413- BPF_STMT(BPF_LDX | BPF_MEM, 4),
98414- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98415- BPF_STMT(BPF_LDX | BPF_MEM, 5),
98416- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98417- BPF_STMT(BPF_LDX | BPF_MEM, 6),
98418- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98419- BPF_STMT(BPF_LDX | BPF_MEM, 7),
98420- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98421- BPF_STMT(BPF_LDX | BPF_MEM, 8),
98422- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98423- BPF_STMT(BPF_LDX | BPF_MEM, 9),
98424- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98425- BPF_STMT(BPF_LDX | BPF_MEM, 10),
98426- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98427- BPF_STMT(BPF_LDX | BPF_MEM, 11),
98428- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98429- BPF_STMT(BPF_LDX | BPF_MEM, 12),
98430- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98431- BPF_STMT(BPF_LDX | BPF_MEM, 13),
98432- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98433- BPF_STMT(BPF_LDX | BPF_MEM, 14),
98434- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98435- BPF_STMT(BPF_LDX | BPF_MEM, 15),
98436- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98437- BPF_STMT(BPF_RET | BPF_A, 0),
98438- },
98439- CLASSIC | FLAG_NO_DATA,
98440- { },
98441- { { 0, 0x2a5a5e5 } },
98442- },
98443- {
98444- "check: SKF_AD_MAX",
98445- .u.insns = {
98446- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98447- SKF_AD_OFF + SKF_AD_MAX),
98448- BPF_STMT(BPF_RET | BPF_A, 0),
98449- },
98450- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98451- { },
98452- { },
98453- },
98454- { /* Passes checker but fails during runtime. */
98455- "LD [SKF_AD_OFF-1]",
98456- .u.insns = {
98457- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98458- SKF_AD_OFF - 1),
98459- BPF_STMT(BPF_RET | BPF_K, 1),
98460- },
98461- CLASSIC,
98462- { },
98463- { { 1, 0 } },
98464- },
98465-};
98466-
98467-static struct net_device dev;
98468-
98469-static struct sk_buff *populate_skb(char *buf, int size)
98470-{
98471- struct sk_buff *skb;
98472-
98473- if (size >= MAX_DATA)
98474- return NULL;
98475-
98476- skb = alloc_skb(MAX_DATA, GFP_KERNEL);
98477- if (!skb)
98478- return NULL;
98479-
98480- memcpy(__skb_put(skb, size), buf, size);
98481-
98482- /* Initialize a fake skb with test pattern. */
98483- skb_reset_mac_header(skb);
98484- skb->protocol = htons(ETH_P_IP);
98485- skb->pkt_type = SKB_TYPE;
98486- skb->mark = SKB_MARK;
98487- skb->hash = SKB_HASH;
98488- skb->queue_mapping = SKB_QUEUE_MAP;
98489- skb->vlan_tci = SKB_VLAN_TCI;
98490- skb->dev = &dev;
98491- skb->dev->ifindex = SKB_DEV_IFINDEX;
98492- skb->dev->type = SKB_DEV_TYPE;
98493- skb_set_network_header(skb, min(size, ETH_HLEN));
98494-
98495- return skb;
98496-}
98497-
98498-static void *generate_test_data(struct bpf_test *test, int sub)
98499-{
98500- if (test->aux & FLAG_NO_DATA)
98501- return NULL;
98502-
98503- /* Test case expects an skb, so populate one. Various
98504- * subtests generate skbs of different sizes based on
98505- * the same data.
98506- */
98507- return populate_skb(test->data, test->test[sub].data_size);
98508-}
98509-
98510-static void release_test_data(const struct bpf_test *test, void *data)
98511-{
98512- if (test->aux & FLAG_NO_DATA)
98513- return;
98514-
98515- kfree_skb(data);
98516-}
98517-
98518-static int probe_filter_length(struct sock_filter *fp)
98519-{
98520- int len = 0;
98521-
98522- for (len = MAX_INSNS - 1; len > 0; --len)
98523- if (fp[len].code != 0 || fp[len].k != 0)
98524- break;
98525-
98526- return len + 1;
98527-}
98528-
98529-static struct sk_filter *generate_filter(int which, int *err)
98530-{
98531- struct sk_filter *fp;
98532- struct sock_fprog_kern fprog;
98533- unsigned int flen = probe_filter_length(tests[which].u.insns);
98534- __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
98535-
98536- switch (test_type) {
98537- case CLASSIC:
98538- fprog.filter = tests[which].u.insns;
98539- fprog.len = flen;
98540-
98541- *err = sk_unattached_filter_create(&fp, &fprog);
98542- if (tests[which].aux & FLAG_EXPECTED_FAIL) {
98543- if (*err == -EINVAL) {
98544- pr_cont("PASS\n");
98545- /* Verifier rejected filter as expected. */
98546- *err = 0;
98547- return NULL;
98548- } else {
98549- pr_cont("UNEXPECTED_PASS\n");
98550- /* Verifier didn't reject the test that's
98551- * bad enough, just return!
98552- */
98553- *err = -EINVAL;
98554- return NULL;
98555- }
98556- }
98557- /* We don't expect to fail. */
98558- if (*err) {
98559- pr_cont("FAIL to attach err=%d len=%d\n",
98560- *err, fprog.len);
98561- return NULL;
98562- }
98563- break;
98564-
98565- case INTERNAL:
98566- fp = kzalloc(sk_filter_size(flen), GFP_KERNEL);
98567- if (fp == NULL) {
98568- pr_cont("UNEXPECTED_FAIL no memory left\n");
98569- *err = -ENOMEM;
98570- return NULL;
98571- }
98572-
98573- fp->len = flen;
98574- memcpy(fp->insnsi, tests[which].u.insns_int,
98575- fp->len * sizeof(struct sock_filter_int));
98576-
98577- sk_filter_select_runtime(fp);
98578- break;
98579- }
98580-
98581- *err = 0;
98582- return fp;
98583-}
98584-
98585-static void release_filter(struct sk_filter *fp, int which)
98586-{
98587- __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
98588-
98589- switch (test_type) {
98590- case CLASSIC:
98591- sk_unattached_filter_destroy(fp);
98592- break;
98593- case INTERNAL:
98594- sk_filter_free(fp);
98595- break;
98596- }
98597-}
98598-
98599-static int __run_one(const struct sk_filter *fp, const void *data,
98600- int runs, u64 *duration)
98601-{
98602- u64 start, finish;
98603- int ret, i;
98604-
98605- start = ktime_to_us(ktime_get());
98606-
98607- for (i = 0; i < runs; i++)
98608- ret = SK_RUN_FILTER(fp, data);
98609-
98610- finish = ktime_to_us(ktime_get());
98611-
98612- *duration = (finish - start) * 1000ULL;
98613- do_div(*duration, runs);
98614-
98615- return ret;
98616-}
98617-
98618-static int run_one(const struct sk_filter *fp, struct bpf_test *test)
98619-{
98620- int err_cnt = 0, i, runs = MAX_TESTRUNS;
98621-
98622- for (i = 0; i < MAX_SUBTESTS; i++) {
98623- void *data;
98624- u64 duration;
98625- u32 ret;
98626-
98627- if (test->test[i].data_size == 0 &&
98628- test->test[i].result == 0)
98629- break;
98630-
98631- data = generate_test_data(test, i);
98632- ret = __run_one(fp, data, runs, &duration);
98633- release_test_data(test, data);
98634-
98635- if (ret == test->test[i].result) {
98636- pr_cont("%lld ", duration);
98637- } else {
98638- pr_cont("ret %d != %d ", ret,
98639- test->test[i].result);
98640- err_cnt++;
98641- }
98642- }
98643-
98644- return err_cnt;
98645-}
98646-
98647-static __init int test_bpf(void)
98648-{
98649- int i, err_cnt = 0, pass_cnt = 0;
98650-
98651- for (i = 0; i < ARRAY_SIZE(tests); i++) {
98652- struct sk_filter *fp;
98653- int err;
98654-
98655- pr_info("#%d %s ", i, tests[i].descr);
98656-
98657- fp = generate_filter(i, &err);
98658- if (fp == NULL) {
98659- if (err == 0) {
98660- pass_cnt++;
98661- continue;
98662- }
98663-
98664- return err;
98665- }
98666- err = run_one(fp, &tests[i]);
98667- release_filter(fp, i);
98668-
98669- if (err) {
98670- pr_cont("FAIL (%d times)\n", err);
98671- err_cnt++;
98672- } else {
98673- pr_cont("PASS\n");
98674- pass_cnt++;
98675- }
98676- }
98677-
98678- pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
98679- return err_cnt ? -EINVAL : 0;
98680-}
98681-
98682-static int __init test_bpf_init(void)
98683-{
98684- return test_bpf();
98685-}
98686-
98687-static void __exit test_bpf_exit(void)
98688-{
98689-}
98690-
98691-module_init(test_bpf_init);
98692-module_exit(test_bpf_exit);
98693-
98694-MODULE_LICENSE("GPL");
98695diff --git a/lib/usercopy.c b/lib/usercopy.c
98696index 4f5b1dd..7cab418 100644
98697--- a/lib/usercopy.c
98698+++ b/lib/usercopy.c
98699@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
98700 WARN(1, "Buffer overflow detected!\n");
98701 }
98702 EXPORT_SYMBOL(copy_from_user_overflow);
98703+
98704+void copy_to_user_overflow(void)
98705+{
98706+ WARN(1, "Buffer overflow detected!\n");
98707+}
98708+EXPORT_SYMBOL(copy_to_user_overflow);
98709diff --git a/lib/vsprintf.c b/lib/vsprintf.c
98710index 6fe2c84..2fe5ec6 100644
98711--- a/lib/vsprintf.c
98712+++ b/lib/vsprintf.c
98713@@ -16,6 +16,9 @@
98714 * - scnprintf and vscnprintf
98715 */
98716
98717+#ifdef CONFIG_GRKERNSEC_HIDESYM
98718+#define __INCLUDED_BY_HIDESYM 1
98719+#endif
98720 #include <stdarg.h>
98721 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
98722 #include <linux/types.h>
98723@@ -624,7 +627,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
98724 #ifdef CONFIG_KALLSYMS
98725 if (*fmt == 'B')
98726 sprint_backtrace(sym, value);
98727- else if (*fmt != 'f' && *fmt != 's')
98728+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
98729 sprint_symbol(sym, value);
98730 else
98731 sprint_symbol_no_offset(sym, value);
98732@@ -1183,7 +1186,11 @@ char *address_val(char *buf, char *end, const void *addr,
98733 return number(buf, end, num, spec);
98734 }
98735
98736+#ifdef CONFIG_GRKERNSEC_HIDESYM
98737+int kptr_restrict __read_mostly = 2;
98738+#else
98739 int kptr_restrict __read_mostly;
98740+#endif
98741
98742 /*
98743 * Show a '%p' thing. A kernel extension is that the '%p' is followed
98744@@ -1194,8 +1201,10 @@ int kptr_restrict __read_mostly;
98745 *
98746 * - 'F' For symbolic function descriptor pointers with offset
98747 * - 'f' For simple symbolic function names without offset
98748+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
98749 * - 'S' For symbolic direct pointers with offset
98750 * - 's' For symbolic direct pointers without offset
98751+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
98752 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
98753 * - 'B' For backtraced symbolic direct pointers with offset
98754 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
98755@@ -1263,12 +1272,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
98756
98757 if (!ptr && *fmt != 'K') {
98758 /*
98759- * Print (null) with the same width as a pointer so it makes
98760+ * Print (nil) with the same width as a pointer so it makes
98761 * tabular output look nice.
98762 */
98763 if (spec.field_width == -1)
98764 spec.field_width = default_width;
98765- return string(buf, end, "(null)", spec);
98766+ return string(buf, end, "(nil)", spec);
98767 }
98768
98769 switch (*fmt) {
98770@@ -1278,6 +1287,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
98771 /* Fallthrough */
98772 case 'S':
98773 case 's':
98774+#ifdef CONFIG_GRKERNSEC_HIDESYM
98775+ break;
98776+#else
98777+ return symbol_string(buf, end, ptr, spec, fmt);
98778+#endif
98779+ case 'X':
98780+ ptr = dereference_function_descriptor(ptr);
98781+ case 'A':
98782 case 'B':
98783 return symbol_string(buf, end, ptr, spec, fmt);
98784 case 'R':
98785@@ -1333,6 +1350,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
98786 va_end(va);
98787 return buf;
98788 }
98789+ case 'P':
98790+ break;
98791 case 'K':
98792 /*
98793 * %pK cannot be used in IRQ context because its test
98794@@ -1390,6 +1409,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
98795 ((const struct file *)ptr)->f_path.dentry,
98796 spec, fmt);
98797 }
98798+
98799+#ifdef CONFIG_GRKERNSEC_HIDESYM
98800+ /* 'P' = approved pointers to copy to userland,
98801+ as in the /proc/kallsyms case, as we make it display nothing
98802+ for non-root users, and the real contents for root users
98803+ 'X' = approved simple symbols
98804+ Also ignore 'K' pointers, since we force their NULLing for non-root users
98805+ above
98806+ */
98807+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
98808+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
98809+ dump_stack();
98810+ ptr = NULL;
98811+ }
98812+#endif
98813+
98814 spec.flags |= SMALL;
98815 if (spec.field_width == -1) {
98816 spec.field_width = default_width;
98817@@ -2089,11 +2124,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
98818 typeof(type) value; \
98819 if (sizeof(type) == 8) { \
98820 args = PTR_ALIGN(args, sizeof(u32)); \
98821- *(u32 *)&value = *(u32 *)args; \
98822- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
98823+ *(u32 *)&value = *(const u32 *)args; \
98824+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
98825 } else { \
98826 args = PTR_ALIGN(args, sizeof(type)); \
98827- value = *(typeof(type) *)args; \
98828+ value = *(const typeof(type) *)args; \
98829 } \
98830 args += sizeof(type); \
98831 value; \
98832@@ -2156,7 +2191,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
98833 case FORMAT_TYPE_STR: {
98834 const char *str_arg = args;
98835 args += strlen(str_arg) + 1;
98836- str = string(str, end, (char *)str_arg, spec);
98837+ str = string(str, end, str_arg, spec);
98838 break;
98839 }
98840
98841diff --git a/localversion-grsec b/localversion-grsec
98842new file mode 100644
98843index 0000000..7cd6065
98844--- /dev/null
98845+++ b/localversion-grsec
98846@@ -0,0 +1 @@
98847+-grsec
98848diff --git a/mm/Kconfig b/mm/Kconfig
98849index 3e9977a..205074f 100644
98850--- a/mm/Kconfig
98851+++ b/mm/Kconfig
98852@@ -333,10 +333,11 @@ config KSM
98853 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
98854
98855 config DEFAULT_MMAP_MIN_ADDR
98856- int "Low address space to protect from user allocation"
98857+ int "Low address space to protect from user allocation"
98858 depends on MMU
98859- default 4096
98860- help
98861+ default 32768 if ALPHA || ARM || PARISC || SPARC32
98862+ default 65536
98863+ help
98864 This is the portion of low virtual memory which should be protected
98865 from userspace allocation. Keeping a user from writing to low pages
98866 can help reduce the impact of kernel NULL pointer bugs.
98867@@ -367,7 +368,7 @@ config MEMORY_FAILURE
98868
98869 config HWPOISON_INJECT
98870 tristate "HWPoison pages injector"
98871- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
98872+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
98873 select PROC_PAGE_MONITOR
98874
98875 config NOMMU_INITIAL_TRIM_EXCESS
98876diff --git a/mm/backing-dev.c b/mm/backing-dev.c
98877index 1706cbb..f89dbca 100644
98878--- a/mm/backing-dev.c
98879+++ b/mm/backing-dev.c
98880@@ -12,7 +12,7 @@
98881 #include <linux/device.h>
98882 #include <trace/events/writeback.h>
98883
98884-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
98885+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
98886
98887 struct backing_dev_info default_backing_dev_info = {
98888 .name = "default",
98889@@ -533,7 +533,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
98890 return err;
98891
98892 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
98893- atomic_long_inc_return(&bdi_seq));
98894+ atomic_long_inc_return_unchecked(&bdi_seq));
98895 if (err) {
98896 bdi_destroy(bdi);
98897 return err;
98898diff --git a/mm/filemap.c b/mm/filemap.c
98899index 900edfa..ff056b1 100644
98900--- a/mm/filemap.c
98901+++ b/mm/filemap.c
98902@@ -2074,7 +2074,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
98903 struct address_space *mapping = file->f_mapping;
98904
98905 if (!mapping->a_ops->readpage)
98906- return -ENOEXEC;
98907+ return -ENODEV;
98908 file_accessed(file);
98909 vma->vm_ops = &generic_file_vm_ops;
98910 return 0;
98911@@ -2252,6 +2252,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
98912 *pos = i_size_read(inode);
98913
98914 if (limit != RLIM_INFINITY) {
98915+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
98916 if (*pos >= limit) {
98917 send_sig(SIGXFSZ, current, 0);
98918 return -EFBIG;
98919diff --git a/mm/fremap.c b/mm/fremap.c
98920index 72b8fa3..c5b39f1 100644
98921--- a/mm/fremap.c
98922+++ b/mm/fremap.c
98923@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
98924 retry:
98925 vma = find_vma(mm, start);
98926
98927+#ifdef CONFIG_PAX_SEGMEXEC
98928+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
98929+ goto out;
98930+#endif
98931+
98932 /*
98933 * Make sure the vma is shared, that it supports prefaulting,
98934 * and that the remapped range is valid and fully within
98935diff --git a/mm/gup.c b/mm/gup.c
98936index cc5a9e7..d496acf 100644
98937--- a/mm/gup.c
98938+++ b/mm/gup.c
98939@@ -265,11 +265,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
98940 unsigned int fault_flags = 0;
98941 int ret;
98942
98943- /* For mlock, just skip the stack guard page. */
98944- if ((*flags & FOLL_MLOCK) &&
98945- (stack_guard_page_start(vma, address) ||
98946- stack_guard_page_end(vma, address + PAGE_SIZE)))
98947- return -ENOENT;
98948 if (*flags & FOLL_WRITE)
98949 fault_flags |= FAULT_FLAG_WRITE;
98950 if (nonblocking)
98951@@ -424,14 +419,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
98952 if (!(gup_flags & FOLL_FORCE))
98953 gup_flags |= FOLL_NUMA;
98954
98955- do {
98956+ while (nr_pages) {
98957 struct page *page;
98958 unsigned int foll_flags = gup_flags;
98959 unsigned int page_increm;
98960
98961 /* first iteration or cross vma bound */
98962 if (!vma || start >= vma->vm_end) {
98963- vma = find_extend_vma(mm, start);
98964+ vma = find_vma(mm, start);
98965 if (!vma && in_gate_area(mm, start)) {
98966 int ret;
98967 ret = get_gate_page(mm, start & PAGE_MASK,
98968@@ -443,7 +438,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
98969 goto next_page;
98970 }
98971
98972- if (!vma || check_vma_flags(vma, gup_flags))
98973+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
98974 return i ? : -EFAULT;
98975 if (is_vm_hugetlb_page(vma)) {
98976 i = follow_hugetlb_page(mm, vma, pages, vmas,
98977@@ -498,7 +493,7 @@ next_page:
98978 i += page_increm;
98979 start += page_increm * PAGE_SIZE;
98980 nr_pages -= page_increm;
98981- } while (nr_pages);
98982+ }
98983 return i;
98984 }
98985 EXPORT_SYMBOL(__get_user_pages);
98986diff --git a/mm/highmem.c b/mm/highmem.c
98987index b32b70c..e512eb0 100644
98988--- a/mm/highmem.c
98989+++ b/mm/highmem.c
98990@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
98991 * So no dangers, even with speculative execution.
98992 */
98993 page = pte_page(pkmap_page_table[i]);
98994+ pax_open_kernel();
98995 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
98996-
98997+ pax_close_kernel();
98998 set_page_address(page, NULL);
98999 need_flush = 1;
99000 }
99001@@ -198,9 +199,11 @@ start:
99002 }
99003 }
99004 vaddr = PKMAP_ADDR(last_pkmap_nr);
99005+
99006+ pax_open_kernel();
99007 set_pte_at(&init_mm, vaddr,
99008 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
99009-
99010+ pax_close_kernel();
99011 pkmap_count[last_pkmap_nr] = 1;
99012 set_page_address(page, (void *)vaddr);
99013
99014diff --git a/mm/hugetlb.c b/mm/hugetlb.c
99015index 7a0a73d..d583cca 100644
99016--- a/mm/hugetlb.c
99017+++ b/mm/hugetlb.c
99018@@ -2250,6 +2250,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
99019 struct hstate *h = &default_hstate;
99020 unsigned long tmp;
99021 int ret;
99022+ ctl_table_no_const hugetlb_table;
99023
99024 if (!hugepages_supported())
99025 return -ENOTSUPP;
99026@@ -2259,9 +2260,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
99027 if (write && hstate_is_gigantic(h) && !gigantic_page_supported())
99028 return -EINVAL;
99029
99030- table->data = &tmp;
99031- table->maxlen = sizeof(unsigned long);
99032- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
99033+ hugetlb_table = *table;
99034+ hugetlb_table.data = &tmp;
99035+ hugetlb_table.maxlen = sizeof(unsigned long);
99036+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
99037 if (ret)
99038 goto out;
99039
99040@@ -2306,6 +2308,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
99041 struct hstate *h = &default_hstate;
99042 unsigned long tmp;
99043 int ret;
99044+ ctl_table_no_const hugetlb_table;
99045
99046 if (!hugepages_supported())
99047 return -ENOTSUPP;
99048@@ -2315,9 +2318,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
99049 if (write && hstate_is_gigantic(h))
99050 return -EINVAL;
99051
99052- table->data = &tmp;
99053- table->maxlen = sizeof(unsigned long);
99054- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
99055+ hugetlb_table = *table;
99056+ hugetlb_table.data = &tmp;
99057+ hugetlb_table.maxlen = sizeof(unsigned long);
99058+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
99059 if (ret)
99060 goto out;
99061
99062@@ -2798,6 +2802,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
99063 return 1;
99064 }
99065
99066+#ifdef CONFIG_PAX_SEGMEXEC
99067+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
99068+{
99069+ struct mm_struct *mm = vma->vm_mm;
99070+ struct vm_area_struct *vma_m;
99071+ unsigned long address_m;
99072+ pte_t *ptep_m;
99073+
99074+ vma_m = pax_find_mirror_vma(vma);
99075+ if (!vma_m)
99076+ return;
99077+
99078+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
99079+ address_m = address + SEGMEXEC_TASK_SIZE;
99080+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
99081+ get_page(page_m);
99082+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
99083+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
99084+}
99085+#endif
99086+
99087 /*
99088 * Hugetlb_cow() should be called with page lock of the original hugepage held.
99089 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
99090@@ -2915,6 +2940,11 @@ retry_avoidcopy:
99091 make_huge_pte(vma, new_page, 1));
99092 page_remove_rmap(old_page);
99093 hugepage_add_new_anon_rmap(new_page, vma, address);
99094+
99095+#ifdef CONFIG_PAX_SEGMEXEC
99096+ pax_mirror_huge_pte(vma, address, new_page);
99097+#endif
99098+
99099 /* Make the old page be freed below */
99100 new_page = old_page;
99101 }
99102@@ -3074,6 +3104,10 @@ retry:
99103 && (vma->vm_flags & VM_SHARED)));
99104 set_huge_pte_at(mm, address, ptep, new_pte);
99105
99106+#ifdef CONFIG_PAX_SEGMEXEC
99107+ pax_mirror_huge_pte(vma, address, page);
99108+#endif
99109+
99110 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
99111 /* Optimization, do the COW without a second fault */
99112 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
99113@@ -3140,6 +3174,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
99114 struct hstate *h = hstate_vma(vma);
99115 struct address_space *mapping;
99116
99117+#ifdef CONFIG_PAX_SEGMEXEC
99118+ struct vm_area_struct *vma_m;
99119+#endif
99120+
99121 address &= huge_page_mask(h);
99122
99123 ptep = huge_pte_offset(mm, address);
99124@@ -3153,6 +3191,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
99125 VM_FAULT_SET_HINDEX(hstate_index(h));
99126 }
99127
99128+#ifdef CONFIG_PAX_SEGMEXEC
99129+ vma_m = pax_find_mirror_vma(vma);
99130+ if (vma_m) {
99131+ unsigned long address_m;
99132+
99133+ if (vma->vm_start > vma_m->vm_start) {
99134+ address_m = address;
99135+ address -= SEGMEXEC_TASK_SIZE;
99136+ vma = vma_m;
99137+ h = hstate_vma(vma);
99138+ } else
99139+ address_m = address + SEGMEXEC_TASK_SIZE;
99140+
99141+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
99142+ return VM_FAULT_OOM;
99143+ address_m &= HPAGE_MASK;
99144+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
99145+ }
99146+#endif
99147+
99148 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
99149 if (!ptep)
99150 return VM_FAULT_OOM;
99151diff --git a/mm/internal.h b/mm/internal.h
99152index 7f22a11f..f3c207f 100644
99153--- a/mm/internal.h
99154+++ b/mm/internal.h
99155@@ -109,6 +109,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
99156 * in mm/page_alloc.c
99157 */
99158 extern void __free_pages_bootmem(struct page *page, unsigned int order);
99159+extern void free_compound_page(struct page *page);
99160 extern void prep_compound_page(struct page *page, unsigned long order);
99161 #ifdef CONFIG_MEMORY_FAILURE
99162 extern bool is_free_buddy_page(struct page *page);
99163@@ -351,7 +352,7 @@ extern u32 hwpoison_filter_enable;
99164
99165 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
99166 unsigned long, unsigned long,
99167- unsigned long, unsigned long);
99168+ unsigned long, unsigned long) __intentional_overflow(-1);
99169
99170 extern void set_pageblock_order(void);
99171 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
99172diff --git a/mm/iov_iter.c b/mm/iov_iter.c
99173index 7b5dbd1..af0e329 100644
99174--- a/mm/iov_iter.c
99175+++ b/mm/iov_iter.c
99176@@ -173,7 +173,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
99177
99178 while (bytes) {
99179 char __user *buf = iov->iov_base + base;
99180- int copy = min(bytes, iov->iov_len - base);
99181+ size_t copy = min(bytes, iov->iov_len - base);
99182
99183 base = 0;
99184 left = __copy_from_user_inatomic(vaddr, buf, copy);
99185@@ -201,7 +201,7 @@ static size_t copy_from_user_atomic_iovec(struct page *page,
99186
99187 kaddr = kmap_atomic(page);
99188 if (likely(i->nr_segs == 1)) {
99189- int left;
99190+ size_t left;
99191 char __user *buf = i->iov->iov_base + i->iov_offset;
99192 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
99193 copied = bytes - left;
99194@@ -231,7 +231,7 @@ static void advance_iovec(struct iov_iter *i, size_t bytes)
99195 * zero-length segments (without overruning the iovec).
99196 */
99197 while (bytes || unlikely(i->count && !iov->iov_len)) {
99198- int copy;
99199+ size_t copy;
99200
99201 copy = min(bytes, iov->iov_len - base);
99202 BUG_ON(!i->count || i->count < copy);
99203diff --git a/mm/kmemleak.c b/mm/kmemleak.c
99204index 3cda50c..032ba634 100644
99205--- a/mm/kmemleak.c
99206+++ b/mm/kmemleak.c
99207@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
99208
99209 for (i = 0; i < object->trace_len; i++) {
99210 void *ptr = (void *)object->trace[i];
99211- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
99212+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
99213 }
99214 }
99215
99216@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
99217 return -ENOMEM;
99218 }
99219
99220- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
99221+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
99222 &kmemleak_fops);
99223 if (!dentry)
99224 pr_warning("Failed to create the debugfs kmemleak file\n");
99225diff --git a/mm/maccess.c b/mm/maccess.c
99226index d53adf9..03a24bf 100644
99227--- a/mm/maccess.c
99228+++ b/mm/maccess.c
99229@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
99230 set_fs(KERNEL_DS);
99231 pagefault_disable();
99232 ret = __copy_from_user_inatomic(dst,
99233- (__force const void __user *)src, size);
99234+ (const void __force_user *)src, size);
99235 pagefault_enable();
99236 set_fs(old_fs);
99237
99238@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
99239
99240 set_fs(KERNEL_DS);
99241 pagefault_disable();
99242- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
99243+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
99244 pagefault_enable();
99245 set_fs(old_fs);
99246
99247diff --git a/mm/madvise.c b/mm/madvise.c
99248index a402f8f..f5e5daa 100644
99249--- a/mm/madvise.c
99250+++ b/mm/madvise.c
99251@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
99252 pgoff_t pgoff;
99253 unsigned long new_flags = vma->vm_flags;
99254
99255+#ifdef CONFIG_PAX_SEGMEXEC
99256+ struct vm_area_struct *vma_m;
99257+#endif
99258+
99259 switch (behavior) {
99260 case MADV_NORMAL:
99261 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
99262@@ -126,6 +130,13 @@ success:
99263 /*
99264 * vm_flags is protected by the mmap_sem held in write mode.
99265 */
99266+
99267+#ifdef CONFIG_PAX_SEGMEXEC
99268+ vma_m = pax_find_mirror_vma(vma);
99269+ if (vma_m)
99270+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
99271+#endif
99272+
99273 vma->vm_flags = new_flags;
99274
99275 out:
99276@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
99277 struct vm_area_struct **prev,
99278 unsigned long start, unsigned long end)
99279 {
99280+
99281+#ifdef CONFIG_PAX_SEGMEXEC
99282+ struct vm_area_struct *vma_m;
99283+#endif
99284+
99285 *prev = vma;
99286 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
99287 return -EINVAL;
99288@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
99289 zap_page_range(vma, start, end - start, &details);
99290 } else
99291 zap_page_range(vma, start, end - start, NULL);
99292+
99293+#ifdef CONFIG_PAX_SEGMEXEC
99294+ vma_m = pax_find_mirror_vma(vma);
99295+ if (vma_m) {
99296+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
99297+ struct zap_details details = {
99298+ .nonlinear_vma = vma_m,
99299+ .last_index = ULONG_MAX,
99300+ };
99301+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
99302+ } else
99303+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
99304+ }
99305+#endif
99306+
99307 return 0;
99308 }
99309
99310@@ -491,6 +522,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
99311 if (end < start)
99312 return error;
99313
99314+#ifdef CONFIG_PAX_SEGMEXEC
99315+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
99316+ if (end > SEGMEXEC_TASK_SIZE)
99317+ return error;
99318+ } else
99319+#endif
99320+
99321+ if (end > TASK_SIZE)
99322+ return error;
99323+
99324 error = 0;
99325 if (end == start)
99326 return error;
99327diff --git a/mm/memory-failure.c b/mm/memory-failure.c
99328index a013bc9..a897a14 100644
99329--- a/mm/memory-failure.c
99330+++ b/mm/memory-failure.c
99331@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
99332
99333 int sysctl_memory_failure_recovery __read_mostly = 1;
99334
99335-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
99336+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
99337
99338 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
99339
99340@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
99341 pfn, t->comm, t->pid);
99342 si.si_signo = SIGBUS;
99343 si.si_errno = 0;
99344- si.si_addr = (void *)addr;
99345+ si.si_addr = (void __user *)addr;
99346 #ifdef __ARCH_SI_TRAPNO
99347 si.si_trapno = trapno;
99348 #endif
99349@@ -791,7 +791,7 @@ static struct page_state {
99350 unsigned long res;
99351 char *msg;
99352 int (*action)(struct page *p, unsigned long pfn);
99353-} error_states[] = {
99354+} __do_const error_states[] = {
99355 { reserved, reserved, "reserved kernel", me_kernel },
99356 /*
99357 * free pages are specially detected outside this table:
99358@@ -1099,7 +1099,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
99359 nr_pages = 1 << compound_order(hpage);
99360 else /* normal page or thp */
99361 nr_pages = 1;
99362- atomic_long_add(nr_pages, &num_poisoned_pages);
99363+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
99364
99365 /*
99366 * We need/can do nothing about count=0 pages.
99367@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
99368 if (PageHWPoison(hpage)) {
99369 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
99370 || (p != hpage && TestSetPageHWPoison(hpage))) {
99371- atomic_long_sub(nr_pages, &num_poisoned_pages);
99372+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
99373 unlock_page(hpage);
99374 return 0;
99375 }
99376@@ -1186,14 +1186,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
99377 */
99378 if (!PageHWPoison(p)) {
99379 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
99380- atomic_long_sub(nr_pages, &num_poisoned_pages);
99381+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
99382 put_page(hpage);
99383 res = 0;
99384 goto out;
99385 }
99386 if (hwpoison_filter(p)) {
99387 if (TestClearPageHWPoison(p))
99388- atomic_long_sub(nr_pages, &num_poisoned_pages);
99389+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
99390 unlock_page(hpage);
99391 put_page(hpage);
99392 return 0;
99393@@ -1423,7 +1423,7 @@ int unpoison_memory(unsigned long pfn)
99394 return 0;
99395 }
99396 if (TestClearPageHWPoison(p))
99397- atomic_long_dec(&num_poisoned_pages);
99398+ atomic_long_dec_unchecked(&num_poisoned_pages);
99399 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
99400 return 0;
99401 }
99402@@ -1437,7 +1437,7 @@ int unpoison_memory(unsigned long pfn)
99403 */
99404 if (TestClearPageHWPoison(page)) {
99405 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
99406- atomic_long_sub(nr_pages, &num_poisoned_pages);
99407+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
99408 freeit = 1;
99409 if (PageHuge(page))
99410 clear_page_hwpoison_huge_page(page);
99411@@ -1562,11 +1562,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
99412 if (PageHuge(page)) {
99413 set_page_hwpoison_huge_page(hpage);
99414 dequeue_hwpoisoned_huge_page(hpage);
99415- atomic_long_add(1 << compound_order(hpage),
99416+ atomic_long_add_unchecked(1 << compound_order(hpage),
99417 &num_poisoned_pages);
99418 } else {
99419 SetPageHWPoison(page);
99420- atomic_long_inc(&num_poisoned_pages);
99421+ atomic_long_inc_unchecked(&num_poisoned_pages);
99422 }
99423 }
99424 return ret;
99425@@ -1605,7 +1605,7 @@ static int __soft_offline_page(struct page *page, int flags)
99426 put_page(page);
99427 pr_info("soft_offline: %#lx: invalidated\n", pfn);
99428 SetPageHWPoison(page);
99429- atomic_long_inc(&num_poisoned_pages);
99430+ atomic_long_inc_unchecked(&num_poisoned_pages);
99431 return 0;
99432 }
99433
99434@@ -1656,7 +1656,7 @@ static int __soft_offline_page(struct page *page, int flags)
99435 if (!is_free_buddy_page(page))
99436 pr_info("soft offline: %#lx: page leaked\n",
99437 pfn);
99438- atomic_long_inc(&num_poisoned_pages);
99439+ atomic_long_inc_unchecked(&num_poisoned_pages);
99440 }
99441 } else {
99442 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
99443@@ -1726,11 +1726,11 @@ int soft_offline_page(struct page *page, int flags)
99444 if (PageHuge(page)) {
99445 set_page_hwpoison_huge_page(hpage);
99446 dequeue_hwpoisoned_huge_page(hpage);
99447- atomic_long_add(1 << compound_order(hpage),
99448+ atomic_long_add_unchecked(1 << compound_order(hpage),
99449 &num_poisoned_pages);
99450 } else {
99451 SetPageHWPoison(page);
99452- atomic_long_inc(&num_poisoned_pages);
99453+ atomic_long_inc_unchecked(&num_poisoned_pages);
99454 }
99455 }
99456 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
99457diff --git a/mm/memory.c b/mm/memory.c
99458index 8b44f76..66f1954 100644
99459--- a/mm/memory.c
99460+++ b/mm/memory.c
99461@@ -413,6 +413,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
99462 free_pte_range(tlb, pmd, addr);
99463 } while (pmd++, addr = next, addr != end);
99464
99465+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
99466 start &= PUD_MASK;
99467 if (start < floor)
99468 return;
99469@@ -427,6 +428,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
99470 pmd = pmd_offset(pud, start);
99471 pud_clear(pud);
99472 pmd_free_tlb(tlb, pmd, start);
99473+#endif
99474+
99475 }
99476
99477 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
99478@@ -446,6 +449,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
99479 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
99480 } while (pud++, addr = next, addr != end);
99481
99482+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
99483 start &= PGDIR_MASK;
99484 if (start < floor)
99485 return;
99486@@ -460,6 +464,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
99487 pud = pud_offset(pgd, start);
99488 pgd_clear(pgd);
99489 pud_free_tlb(tlb, pud, start);
99490+#endif
99491+
99492 }
99493
99494 /*
99495@@ -1501,6 +1507,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
99496 page_add_file_rmap(page);
99497 set_pte_at(mm, addr, pte, mk_pte(page, prot));
99498
99499+#ifdef CONFIG_PAX_SEGMEXEC
99500+ pax_mirror_file_pte(vma, addr, page, ptl);
99501+#endif
99502+
99503 retval = 0;
99504 pte_unmap_unlock(pte, ptl);
99505 return retval;
99506@@ -1545,9 +1555,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
99507 if (!page_count(page))
99508 return -EINVAL;
99509 if (!(vma->vm_flags & VM_MIXEDMAP)) {
99510+
99511+#ifdef CONFIG_PAX_SEGMEXEC
99512+ struct vm_area_struct *vma_m;
99513+#endif
99514+
99515 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
99516 BUG_ON(vma->vm_flags & VM_PFNMAP);
99517 vma->vm_flags |= VM_MIXEDMAP;
99518+
99519+#ifdef CONFIG_PAX_SEGMEXEC
99520+ vma_m = pax_find_mirror_vma(vma);
99521+ if (vma_m)
99522+ vma_m->vm_flags |= VM_MIXEDMAP;
99523+#endif
99524+
99525 }
99526 return insert_page(vma, addr, page, vma->vm_page_prot);
99527 }
99528@@ -1630,6 +1652,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
99529 unsigned long pfn)
99530 {
99531 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
99532+ BUG_ON(vma->vm_mirror);
99533
99534 if (addr < vma->vm_start || addr >= vma->vm_end)
99535 return -EFAULT;
99536@@ -1877,7 +1900,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
99537
99538 BUG_ON(pud_huge(*pud));
99539
99540- pmd = pmd_alloc(mm, pud, addr);
99541+ pmd = (mm == &init_mm) ?
99542+ pmd_alloc_kernel(mm, pud, addr) :
99543+ pmd_alloc(mm, pud, addr);
99544 if (!pmd)
99545 return -ENOMEM;
99546 do {
99547@@ -1897,7 +1922,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
99548 unsigned long next;
99549 int err;
99550
99551- pud = pud_alloc(mm, pgd, addr);
99552+ pud = (mm == &init_mm) ?
99553+ pud_alloc_kernel(mm, pgd, addr) :
99554+ pud_alloc(mm, pgd, addr);
99555 if (!pud)
99556 return -ENOMEM;
99557 do {
99558@@ -2019,6 +2046,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
99559 return ret;
99560 }
99561
99562+#ifdef CONFIG_PAX_SEGMEXEC
99563+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
99564+{
99565+ struct mm_struct *mm = vma->vm_mm;
99566+ spinlock_t *ptl;
99567+ pte_t *pte, entry;
99568+
99569+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
99570+ entry = *pte;
99571+ if (!pte_present(entry)) {
99572+ if (!pte_none(entry)) {
99573+ BUG_ON(pte_file(entry));
99574+ free_swap_and_cache(pte_to_swp_entry(entry));
99575+ pte_clear_not_present_full(mm, address, pte, 0);
99576+ }
99577+ } else {
99578+ struct page *page;
99579+
99580+ flush_cache_page(vma, address, pte_pfn(entry));
99581+ entry = ptep_clear_flush(vma, address, pte);
99582+ BUG_ON(pte_dirty(entry));
99583+ page = vm_normal_page(vma, address, entry);
99584+ if (page) {
99585+ update_hiwater_rss(mm);
99586+ if (PageAnon(page))
99587+ dec_mm_counter_fast(mm, MM_ANONPAGES);
99588+ else
99589+ dec_mm_counter_fast(mm, MM_FILEPAGES);
99590+ page_remove_rmap(page);
99591+ page_cache_release(page);
99592+ }
99593+ }
99594+ pte_unmap_unlock(pte, ptl);
99595+}
99596+
99597+/* PaX: if vma is mirrored, synchronize the mirror's PTE
99598+ *
99599+ * the ptl of the lower mapped page is held on entry and is not released on exit
99600+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
99601+ */
99602+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
99603+{
99604+ struct mm_struct *mm = vma->vm_mm;
99605+ unsigned long address_m;
99606+ spinlock_t *ptl_m;
99607+ struct vm_area_struct *vma_m;
99608+ pmd_t *pmd_m;
99609+ pte_t *pte_m, entry_m;
99610+
99611+ BUG_ON(!page_m || !PageAnon(page_m));
99612+
99613+ vma_m = pax_find_mirror_vma(vma);
99614+ if (!vma_m)
99615+ return;
99616+
99617+ BUG_ON(!PageLocked(page_m));
99618+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
99619+ address_m = address + SEGMEXEC_TASK_SIZE;
99620+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
99621+ pte_m = pte_offset_map(pmd_m, address_m);
99622+ ptl_m = pte_lockptr(mm, pmd_m);
99623+ if (ptl != ptl_m) {
99624+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
99625+ if (!pte_none(*pte_m))
99626+ goto out;
99627+ }
99628+
99629+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
99630+ page_cache_get(page_m);
99631+ page_add_anon_rmap(page_m, vma_m, address_m);
99632+ inc_mm_counter_fast(mm, MM_ANONPAGES);
99633+ set_pte_at(mm, address_m, pte_m, entry_m);
99634+ update_mmu_cache(vma_m, address_m, pte_m);
99635+out:
99636+ if (ptl != ptl_m)
99637+ spin_unlock(ptl_m);
99638+ pte_unmap(pte_m);
99639+ unlock_page(page_m);
99640+}
99641+
99642+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
99643+{
99644+ struct mm_struct *mm = vma->vm_mm;
99645+ unsigned long address_m;
99646+ spinlock_t *ptl_m;
99647+ struct vm_area_struct *vma_m;
99648+ pmd_t *pmd_m;
99649+ pte_t *pte_m, entry_m;
99650+
99651+ BUG_ON(!page_m || PageAnon(page_m));
99652+
99653+ vma_m = pax_find_mirror_vma(vma);
99654+ if (!vma_m)
99655+ return;
99656+
99657+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
99658+ address_m = address + SEGMEXEC_TASK_SIZE;
99659+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
99660+ pte_m = pte_offset_map(pmd_m, address_m);
99661+ ptl_m = pte_lockptr(mm, pmd_m);
99662+ if (ptl != ptl_m) {
99663+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
99664+ if (!pte_none(*pte_m))
99665+ goto out;
99666+ }
99667+
99668+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
99669+ page_cache_get(page_m);
99670+ page_add_file_rmap(page_m);
99671+ inc_mm_counter_fast(mm, MM_FILEPAGES);
99672+ set_pte_at(mm, address_m, pte_m, entry_m);
99673+ update_mmu_cache(vma_m, address_m, pte_m);
99674+out:
99675+ if (ptl != ptl_m)
99676+ spin_unlock(ptl_m);
99677+ pte_unmap(pte_m);
99678+}
99679+
99680+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
99681+{
99682+ struct mm_struct *mm = vma->vm_mm;
99683+ unsigned long address_m;
99684+ spinlock_t *ptl_m;
99685+ struct vm_area_struct *vma_m;
99686+ pmd_t *pmd_m;
99687+ pte_t *pte_m, entry_m;
99688+
99689+ vma_m = pax_find_mirror_vma(vma);
99690+ if (!vma_m)
99691+ return;
99692+
99693+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
99694+ address_m = address + SEGMEXEC_TASK_SIZE;
99695+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
99696+ pte_m = pte_offset_map(pmd_m, address_m);
99697+ ptl_m = pte_lockptr(mm, pmd_m);
99698+ if (ptl != ptl_m) {
99699+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
99700+ if (!pte_none(*pte_m))
99701+ goto out;
99702+ }
99703+
99704+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
99705+ set_pte_at(mm, address_m, pte_m, entry_m);
99706+out:
99707+ if (ptl != ptl_m)
99708+ spin_unlock(ptl_m);
99709+ pte_unmap(pte_m);
99710+}
99711+
99712+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
99713+{
99714+ struct page *page_m;
99715+ pte_t entry;
99716+
99717+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
99718+ goto out;
99719+
99720+ entry = *pte;
99721+ page_m = vm_normal_page(vma, address, entry);
99722+ if (!page_m)
99723+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
99724+ else if (PageAnon(page_m)) {
99725+ if (pax_find_mirror_vma(vma)) {
99726+ pte_unmap_unlock(pte, ptl);
99727+ lock_page(page_m);
99728+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
99729+ if (pte_same(entry, *pte))
99730+ pax_mirror_anon_pte(vma, address, page_m, ptl);
99731+ else
99732+ unlock_page(page_m);
99733+ }
99734+ } else
99735+ pax_mirror_file_pte(vma, address, page_m, ptl);
99736+
99737+out:
99738+ pte_unmap_unlock(pte, ptl);
99739+}
99740+#endif
99741+
99742 /*
99743 * This routine handles present pages, when users try to write
99744 * to a shared page. It is done by copying the page to a new address
99745@@ -2216,6 +2423,12 @@ gotten:
99746 */
99747 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
99748 if (likely(pte_same(*page_table, orig_pte))) {
99749+
99750+#ifdef CONFIG_PAX_SEGMEXEC
99751+ if (pax_find_mirror_vma(vma))
99752+ BUG_ON(!trylock_page(new_page));
99753+#endif
99754+
99755 if (old_page) {
99756 if (!PageAnon(old_page)) {
99757 dec_mm_counter_fast(mm, MM_FILEPAGES);
99758@@ -2267,6 +2480,10 @@ gotten:
99759 page_remove_rmap(old_page);
99760 }
99761
99762+#ifdef CONFIG_PAX_SEGMEXEC
99763+ pax_mirror_anon_pte(vma, address, new_page, ptl);
99764+#endif
99765+
99766 /* Free the old page.. */
99767 new_page = old_page;
99768 ret |= VM_FAULT_WRITE;
99769@@ -2540,6 +2757,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
99770 swap_free(entry);
99771 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
99772 try_to_free_swap(page);
99773+
99774+#ifdef CONFIG_PAX_SEGMEXEC
99775+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
99776+#endif
99777+
99778 unlock_page(page);
99779 if (page != swapcache) {
99780 /*
99781@@ -2563,6 +2785,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
99782
99783 /* No need to invalidate - it was non-present before */
99784 update_mmu_cache(vma, address, page_table);
99785+
99786+#ifdef CONFIG_PAX_SEGMEXEC
99787+ pax_mirror_anon_pte(vma, address, page, ptl);
99788+#endif
99789+
99790 unlock:
99791 pte_unmap_unlock(page_table, ptl);
99792 out:
99793@@ -2582,40 +2809,6 @@ out_release:
99794 }
99795
99796 /*
99797- * This is like a special single-page "expand_{down|up}wards()",
99798- * except we must first make sure that 'address{-|+}PAGE_SIZE'
99799- * doesn't hit another vma.
99800- */
99801-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
99802-{
99803- address &= PAGE_MASK;
99804- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
99805- struct vm_area_struct *prev = vma->vm_prev;
99806-
99807- /*
99808- * Is there a mapping abutting this one below?
99809- *
99810- * That's only ok if it's the same stack mapping
99811- * that has gotten split..
99812- */
99813- if (prev && prev->vm_end == address)
99814- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
99815-
99816- expand_downwards(vma, address - PAGE_SIZE);
99817- }
99818- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
99819- struct vm_area_struct *next = vma->vm_next;
99820-
99821- /* As VM_GROWSDOWN but s/below/above/ */
99822- if (next && next->vm_start == address + PAGE_SIZE)
99823- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
99824-
99825- expand_upwards(vma, address + PAGE_SIZE);
99826- }
99827- return 0;
99828-}
99829-
99830-/*
99831 * We enter with non-exclusive mmap_sem (to exclude vma changes,
99832 * but allow concurrent faults), and pte mapped but not yet locked.
99833 * We return with mmap_sem still held, but pte unmapped and unlocked.
99834@@ -2624,27 +2817,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
99835 unsigned long address, pte_t *page_table, pmd_t *pmd,
99836 unsigned int flags)
99837 {
99838- struct page *page;
99839+ struct page *page = NULL;
99840 spinlock_t *ptl;
99841 pte_t entry;
99842
99843- pte_unmap(page_table);
99844-
99845- /* Check if we need to add a guard page to the stack */
99846- if (check_stack_guard_page(vma, address) < 0)
99847- return VM_FAULT_SIGBUS;
99848-
99849- /* Use the zero-page for reads */
99850 if (!(flags & FAULT_FLAG_WRITE)) {
99851 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
99852 vma->vm_page_prot));
99853- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
99854+ ptl = pte_lockptr(mm, pmd);
99855+ spin_lock(ptl);
99856 if (!pte_none(*page_table))
99857 goto unlock;
99858 goto setpte;
99859 }
99860
99861 /* Allocate our own private page. */
99862+ pte_unmap(page_table);
99863+
99864 if (unlikely(anon_vma_prepare(vma)))
99865 goto oom;
99866 page = alloc_zeroed_user_highpage_movable(vma, address);
99867@@ -2668,6 +2857,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
99868 if (!pte_none(*page_table))
99869 goto release;
99870
99871+#ifdef CONFIG_PAX_SEGMEXEC
99872+ if (pax_find_mirror_vma(vma))
99873+ BUG_ON(!trylock_page(page));
99874+#endif
99875+
99876 inc_mm_counter_fast(mm, MM_ANONPAGES);
99877 page_add_new_anon_rmap(page, vma, address);
99878 setpte:
99879@@ -2675,6 +2869,12 @@ setpte:
99880
99881 /* No need to invalidate - it was non-present before */
99882 update_mmu_cache(vma, address, page_table);
99883+
99884+#ifdef CONFIG_PAX_SEGMEXEC
99885+ if (page)
99886+ pax_mirror_anon_pte(vma, address, page, ptl);
99887+#endif
99888+
99889 unlock:
99890 pte_unmap_unlock(page_table, ptl);
99891 return 0;
99892@@ -2906,6 +3106,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
99893 return ret;
99894 }
99895 do_set_pte(vma, address, fault_page, pte, false, false);
99896+
99897+#ifdef CONFIG_PAX_SEGMEXEC
99898+ pax_mirror_file_pte(vma, address, fault_page, ptl);
99899+#endif
99900+
99901 unlock_page(fault_page);
99902 unlock_out:
99903 pte_unmap_unlock(pte, ptl);
99904@@ -2947,7 +3152,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
99905 page_cache_release(fault_page);
99906 goto uncharge_out;
99907 }
99908+
99909+#ifdef CONFIG_PAX_SEGMEXEC
99910+ if (pax_find_mirror_vma(vma))
99911+ BUG_ON(!trylock_page(new_page));
99912+#endif
99913+
99914 do_set_pte(vma, address, new_page, pte, true, true);
99915+
99916+#ifdef CONFIG_PAX_SEGMEXEC
99917+ pax_mirror_anon_pte(vma, address, new_page, ptl);
99918+#endif
99919+
99920 pte_unmap_unlock(pte, ptl);
99921 unlock_page(fault_page);
99922 page_cache_release(fault_page);
99923@@ -2995,6 +3211,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
99924 return ret;
99925 }
99926 do_set_pte(vma, address, fault_page, pte, true, false);
99927+
99928+#ifdef CONFIG_PAX_SEGMEXEC
99929+ pax_mirror_file_pte(vma, address, fault_page, ptl);
99930+#endif
99931+
99932 pte_unmap_unlock(pte, ptl);
99933
99934 if (set_page_dirty(fault_page))
99935@@ -3225,6 +3446,12 @@ static int handle_pte_fault(struct mm_struct *mm,
99936 if (flags & FAULT_FLAG_WRITE)
99937 flush_tlb_fix_spurious_fault(vma, address);
99938 }
99939+
99940+#ifdef CONFIG_PAX_SEGMEXEC
99941+ pax_mirror_pte(vma, address, pte, pmd, ptl);
99942+ return 0;
99943+#endif
99944+
99945 unlock:
99946 pte_unmap_unlock(pte, ptl);
99947 return 0;
99948@@ -3241,9 +3468,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
99949 pmd_t *pmd;
99950 pte_t *pte;
99951
99952+#ifdef CONFIG_PAX_SEGMEXEC
99953+ struct vm_area_struct *vma_m;
99954+#endif
99955+
99956 if (unlikely(is_vm_hugetlb_page(vma)))
99957 return hugetlb_fault(mm, vma, address, flags);
99958
99959+#ifdef CONFIG_PAX_SEGMEXEC
99960+ vma_m = pax_find_mirror_vma(vma);
99961+ if (vma_m) {
99962+ unsigned long address_m;
99963+ pgd_t *pgd_m;
99964+ pud_t *pud_m;
99965+ pmd_t *pmd_m;
99966+
99967+ if (vma->vm_start > vma_m->vm_start) {
99968+ address_m = address;
99969+ address -= SEGMEXEC_TASK_SIZE;
99970+ vma = vma_m;
99971+ } else
99972+ address_m = address + SEGMEXEC_TASK_SIZE;
99973+
99974+ pgd_m = pgd_offset(mm, address_m);
99975+ pud_m = pud_alloc(mm, pgd_m, address_m);
99976+ if (!pud_m)
99977+ return VM_FAULT_OOM;
99978+ pmd_m = pmd_alloc(mm, pud_m, address_m);
99979+ if (!pmd_m)
99980+ return VM_FAULT_OOM;
99981+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
99982+ return VM_FAULT_OOM;
99983+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
99984+ }
99985+#endif
99986+
99987 pgd = pgd_offset(mm, address);
99988 pud = pud_alloc(mm, pgd, address);
99989 if (!pud)
99990@@ -3371,6 +3630,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
99991 spin_unlock(&mm->page_table_lock);
99992 return 0;
99993 }
99994+
99995+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
99996+{
99997+ pud_t *new = pud_alloc_one(mm, address);
99998+ if (!new)
99999+ return -ENOMEM;
100000+
100001+ smp_wmb(); /* See comment in __pte_alloc */
100002+
100003+ spin_lock(&mm->page_table_lock);
100004+ if (pgd_present(*pgd)) /* Another has populated it */
100005+ pud_free(mm, new);
100006+ else
100007+ pgd_populate_kernel(mm, pgd, new);
100008+ spin_unlock(&mm->page_table_lock);
100009+ return 0;
100010+}
100011 #endif /* __PAGETABLE_PUD_FOLDED */
100012
100013 #ifndef __PAGETABLE_PMD_FOLDED
100014@@ -3401,6 +3677,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
100015 spin_unlock(&mm->page_table_lock);
100016 return 0;
100017 }
100018+
100019+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
100020+{
100021+ pmd_t *new = pmd_alloc_one(mm, address);
100022+ if (!new)
100023+ return -ENOMEM;
100024+
100025+ smp_wmb(); /* See comment in __pte_alloc */
100026+
100027+ spin_lock(&mm->page_table_lock);
100028+#ifndef __ARCH_HAS_4LEVEL_HACK
100029+ if (pud_present(*pud)) /* Another has populated it */
100030+ pmd_free(mm, new);
100031+ else
100032+ pud_populate_kernel(mm, pud, new);
100033+#else
100034+ if (pgd_present(*pud)) /* Another has populated it */
100035+ pmd_free(mm, new);
100036+ else
100037+ pgd_populate_kernel(mm, pud, new);
100038+#endif /* __ARCH_HAS_4LEVEL_HACK */
100039+ spin_unlock(&mm->page_table_lock);
100040+ return 0;
100041+}
100042 #endif /* __PAGETABLE_PMD_FOLDED */
100043
100044 #if !defined(__HAVE_ARCH_GATE_AREA)
100045@@ -3414,7 +3714,7 @@ static int __init gate_vma_init(void)
100046 gate_vma.vm_start = FIXADDR_USER_START;
100047 gate_vma.vm_end = FIXADDR_USER_END;
100048 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
100049- gate_vma.vm_page_prot = __P101;
100050+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
100051
100052 return 0;
100053 }
100054@@ -3548,8 +3848,8 @@ out:
100055 return ret;
100056 }
100057
100058-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
100059- void *buf, int len, int write)
100060+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
100061+ void *buf, size_t len, int write)
100062 {
100063 resource_size_t phys_addr;
100064 unsigned long prot = 0;
100065@@ -3575,8 +3875,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
100066 * Access another process' address space as given in mm. If non-NULL, use the
100067 * given task for page fault accounting.
100068 */
100069-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100070- unsigned long addr, void *buf, int len, int write)
100071+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100072+ unsigned long addr, void *buf, size_t len, int write)
100073 {
100074 struct vm_area_struct *vma;
100075 void *old_buf = buf;
100076@@ -3584,7 +3884,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100077 down_read(&mm->mmap_sem);
100078 /* ignore errors, just check how much was successfully transferred */
100079 while (len) {
100080- int bytes, ret, offset;
100081+ ssize_t bytes, ret, offset;
100082 void *maddr;
100083 struct page *page = NULL;
100084
100085@@ -3643,8 +3943,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100086 *
100087 * The caller must hold a reference on @mm.
100088 */
100089-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
100090- void *buf, int len, int write)
100091+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
100092+ void *buf, size_t len, int write)
100093 {
100094 return __access_remote_vm(NULL, mm, addr, buf, len, write);
100095 }
100096@@ -3654,11 +3954,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
100097 * Source/target buffer must be kernel space,
100098 * Do not walk the page table directly, use get_user_pages
100099 */
100100-int access_process_vm(struct task_struct *tsk, unsigned long addr,
100101- void *buf, int len, int write)
100102+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
100103+ void *buf, size_t len, int write)
100104 {
100105 struct mm_struct *mm;
100106- int ret;
100107+ ssize_t ret;
100108
100109 mm = get_task_mm(tsk);
100110 if (!mm)
100111diff --git a/mm/mempolicy.c b/mm/mempolicy.c
100112index 8f5330d..b41914b 100644
100113--- a/mm/mempolicy.c
100114+++ b/mm/mempolicy.c
100115@@ -750,6 +750,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
100116 unsigned long vmstart;
100117 unsigned long vmend;
100118
100119+#ifdef CONFIG_PAX_SEGMEXEC
100120+ struct vm_area_struct *vma_m;
100121+#endif
100122+
100123 vma = find_vma(mm, start);
100124 if (!vma || vma->vm_start > start)
100125 return -EFAULT;
100126@@ -793,6 +797,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
100127 err = vma_replace_policy(vma, new_pol);
100128 if (err)
100129 goto out;
100130+
100131+#ifdef CONFIG_PAX_SEGMEXEC
100132+ vma_m = pax_find_mirror_vma(vma);
100133+ if (vma_m) {
100134+ err = vma_replace_policy(vma_m, new_pol);
100135+ if (err)
100136+ goto out;
100137+ }
100138+#endif
100139+
100140 }
100141
100142 out:
100143@@ -1225,6 +1239,17 @@ static long do_mbind(unsigned long start, unsigned long len,
100144
100145 if (end < start)
100146 return -EINVAL;
100147+
100148+#ifdef CONFIG_PAX_SEGMEXEC
100149+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
100150+ if (end > SEGMEXEC_TASK_SIZE)
100151+ return -EINVAL;
100152+ } else
100153+#endif
100154+
100155+ if (end > TASK_SIZE)
100156+ return -EINVAL;
100157+
100158 if (end == start)
100159 return 0;
100160
100161@@ -1450,8 +1475,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
100162 */
100163 tcred = __task_cred(task);
100164 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
100165- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
100166- !capable(CAP_SYS_NICE)) {
100167+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
100168 rcu_read_unlock();
100169 err = -EPERM;
100170 goto out_put;
100171@@ -1482,6 +1506,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
100172 goto out;
100173 }
100174
100175+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
100176+ if (mm != current->mm &&
100177+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
100178+ mmput(mm);
100179+ err = -EPERM;
100180+ goto out;
100181+ }
100182+#endif
100183+
100184 err = do_migrate_pages(mm, old, new,
100185 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
100186
100187diff --git a/mm/migrate.c b/mm/migrate.c
100188index be6dbf9..febb8ec 100644
100189--- a/mm/migrate.c
100190+++ b/mm/migrate.c
100191@@ -1506,8 +1506,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
100192 */
100193 tcred = __task_cred(task);
100194 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
100195- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
100196- !capable(CAP_SYS_NICE)) {
100197+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
100198 rcu_read_unlock();
100199 err = -EPERM;
100200 goto out;
100201diff --git a/mm/mlock.c b/mm/mlock.c
100202index b1eb536..091d154 100644
100203--- a/mm/mlock.c
100204+++ b/mm/mlock.c
100205@@ -14,6 +14,7 @@
100206 #include <linux/pagevec.h>
100207 #include <linux/mempolicy.h>
100208 #include <linux/syscalls.h>
100209+#include <linux/security.h>
100210 #include <linux/sched.h>
100211 #include <linux/export.h>
100212 #include <linux/rmap.h>
100213@@ -606,7 +607,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
100214 {
100215 unsigned long nstart, end, tmp;
100216 struct vm_area_struct * vma, * prev;
100217- int error;
100218+ int error = 0;
100219
100220 VM_BUG_ON(start & ~PAGE_MASK);
100221 VM_BUG_ON(len != PAGE_ALIGN(len));
100222@@ -615,6 +616,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
100223 return -EINVAL;
100224 if (end == start)
100225 return 0;
100226+ if (end > TASK_SIZE)
100227+ return -EINVAL;
100228+
100229 vma = find_vma(current->mm, start);
100230 if (!vma || vma->vm_start > start)
100231 return -ENOMEM;
100232@@ -626,6 +630,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
100233 for (nstart = start ; ; ) {
100234 vm_flags_t newflags;
100235
100236+#ifdef CONFIG_PAX_SEGMEXEC
100237+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
100238+ break;
100239+#endif
100240+
100241 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
100242
100243 newflags = vma->vm_flags & ~VM_LOCKED;
100244@@ -739,6 +748,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
100245 locked += current->mm->locked_vm;
100246
100247 /* check against resource limits */
100248+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
100249 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
100250 error = do_mlock(start, len, 1);
100251
100252@@ -776,6 +786,11 @@ static int do_mlockall(int flags)
100253 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
100254 vm_flags_t newflags;
100255
100256+#ifdef CONFIG_PAX_SEGMEXEC
100257+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
100258+ break;
100259+#endif
100260+
100261 newflags = vma->vm_flags & ~VM_LOCKED;
100262 if (flags & MCL_CURRENT)
100263 newflags |= VM_LOCKED;
100264@@ -807,8 +822,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
100265 lock_limit >>= PAGE_SHIFT;
100266
100267 ret = -ENOMEM;
100268+
100269+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
100270+
100271 down_write(&current->mm->mmap_sem);
100272-
100273 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
100274 capable(CAP_IPC_LOCK))
100275 ret = do_mlockall(flags);
100276diff --git a/mm/mmap.c b/mm/mmap.c
100277index 129b847..fbed804 100644
100278--- a/mm/mmap.c
100279+++ b/mm/mmap.c
100280@@ -40,6 +40,7 @@
100281 #include <linux/notifier.h>
100282 #include <linux/memory.h>
100283 #include <linux/printk.h>
100284+#include <linux/random.h>
100285
100286 #include <asm/uaccess.h>
100287 #include <asm/cacheflush.h>
100288@@ -56,6 +57,16 @@
100289 #define arch_rebalance_pgtables(addr, len) (addr)
100290 #endif
100291
100292+static inline void verify_mm_writelocked(struct mm_struct *mm)
100293+{
100294+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
100295+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
100296+ up_read(&mm->mmap_sem);
100297+ BUG();
100298+ }
100299+#endif
100300+}
100301+
100302 static void unmap_region(struct mm_struct *mm,
100303 struct vm_area_struct *vma, struct vm_area_struct *prev,
100304 unsigned long start, unsigned long end);
100305@@ -75,16 +86,25 @@ static void unmap_region(struct mm_struct *mm,
100306 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
100307 *
100308 */
100309-pgprot_t protection_map[16] = {
100310+pgprot_t protection_map[16] __read_only = {
100311 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
100312 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
100313 };
100314
100315-pgprot_t vm_get_page_prot(unsigned long vm_flags)
100316+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
100317 {
100318- return __pgprot(pgprot_val(protection_map[vm_flags &
100319+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
100320 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
100321 pgprot_val(arch_vm_get_page_prot(vm_flags)));
100322+
100323+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
100324+ if (!(__supported_pte_mask & _PAGE_NX) &&
100325+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
100326+ (vm_flags & (VM_READ | VM_WRITE)))
100327+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
100328+#endif
100329+
100330+ return prot;
100331 }
100332 EXPORT_SYMBOL(vm_get_page_prot);
100333
100334@@ -94,6 +114,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
100335 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
100336 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
100337 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
100338+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
100339 /*
100340 * Make sure vm_committed_as in one cacheline and not cacheline shared with
100341 * other variables. It can be updated by several CPUs frequently.
100342@@ -250,6 +271,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
100343 struct vm_area_struct *next = vma->vm_next;
100344
100345 might_sleep();
100346+ BUG_ON(vma->vm_mirror);
100347 if (vma->vm_ops && vma->vm_ops->close)
100348 vma->vm_ops->close(vma);
100349 if (vma->vm_file)
100350@@ -294,6 +316,12 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
100351 * not page aligned -Ram Gupta
100352 */
100353 rlim = rlimit(RLIMIT_DATA);
100354+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
100355+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
100356+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
100357+ rlim = 4096 * PAGE_SIZE;
100358+#endif
100359+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
100360 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
100361 (mm->end_data - mm->start_data) > rlim)
100362 goto out;
100363@@ -944,6 +972,12 @@ static int
100364 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
100365 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
100366 {
100367+
100368+#ifdef CONFIG_PAX_SEGMEXEC
100369+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
100370+ return 0;
100371+#endif
100372+
100373 if (is_mergeable_vma(vma, file, vm_flags) &&
100374 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
100375 if (vma->vm_pgoff == vm_pgoff)
100376@@ -963,6 +997,12 @@ static int
100377 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
100378 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
100379 {
100380+
100381+#ifdef CONFIG_PAX_SEGMEXEC
100382+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
100383+ return 0;
100384+#endif
100385+
100386 if (is_mergeable_vma(vma, file, vm_flags) &&
100387 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
100388 pgoff_t vm_pglen;
100389@@ -1005,13 +1045,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
100390 struct vm_area_struct *vma_merge(struct mm_struct *mm,
100391 struct vm_area_struct *prev, unsigned long addr,
100392 unsigned long end, unsigned long vm_flags,
100393- struct anon_vma *anon_vma, struct file *file,
100394+ struct anon_vma *anon_vma, struct file *file,
100395 pgoff_t pgoff, struct mempolicy *policy)
100396 {
100397 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
100398 struct vm_area_struct *area, *next;
100399 int err;
100400
100401+#ifdef CONFIG_PAX_SEGMEXEC
100402+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
100403+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
100404+
100405+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
100406+#endif
100407+
100408 /*
100409 * We later require that vma->vm_flags == vm_flags,
100410 * so this tests vma->vm_flags & VM_SPECIAL, too.
100411@@ -1027,6 +1074,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
100412 if (next && next->vm_end == end) /* cases 6, 7, 8 */
100413 next = next->vm_next;
100414
100415+#ifdef CONFIG_PAX_SEGMEXEC
100416+ if (prev)
100417+ prev_m = pax_find_mirror_vma(prev);
100418+ if (area)
100419+ area_m = pax_find_mirror_vma(area);
100420+ if (next)
100421+ next_m = pax_find_mirror_vma(next);
100422+#endif
100423+
100424 /*
100425 * Can it merge with the predecessor?
100426 */
100427@@ -1046,9 +1102,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
100428 /* cases 1, 6 */
100429 err = vma_adjust(prev, prev->vm_start,
100430 next->vm_end, prev->vm_pgoff, NULL);
100431- } else /* cases 2, 5, 7 */
100432+
100433+#ifdef CONFIG_PAX_SEGMEXEC
100434+ if (!err && prev_m)
100435+ err = vma_adjust(prev_m, prev_m->vm_start,
100436+ next_m->vm_end, prev_m->vm_pgoff, NULL);
100437+#endif
100438+
100439+ } else { /* cases 2, 5, 7 */
100440 err = vma_adjust(prev, prev->vm_start,
100441 end, prev->vm_pgoff, NULL);
100442+
100443+#ifdef CONFIG_PAX_SEGMEXEC
100444+ if (!err && prev_m)
100445+ err = vma_adjust(prev_m, prev_m->vm_start,
100446+ end_m, prev_m->vm_pgoff, NULL);
100447+#endif
100448+
100449+ }
100450 if (err)
100451 return NULL;
100452 khugepaged_enter_vma_merge(prev);
100453@@ -1062,12 +1133,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
100454 mpol_equal(policy, vma_policy(next)) &&
100455 can_vma_merge_before(next, vm_flags,
100456 anon_vma, file, pgoff+pglen)) {
100457- if (prev && addr < prev->vm_end) /* case 4 */
100458+ if (prev && addr < prev->vm_end) { /* case 4 */
100459 err = vma_adjust(prev, prev->vm_start,
100460 addr, prev->vm_pgoff, NULL);
100461- else /* cases 3, 8 */
100462+
100463+#ifdef CONFIG_PAX_SEGMEXEC
100464+ if (!err && prev_m)
100465+ err = vma_adjust(prev_m, prev_m->vm_start,
100466+ addr_m, prev_m->vm_pgoff, NULL);
100467+#endif
100468+
100469+ } else { /* cases 3, 8 */
100470 err = vma_adjust(area, addr, next->vm_end,
100471 next->vm_pgoff - pglen, NULL);
100472+
100473+#ifdef CONFIG_PAX_SEGMEXEC
100474+ if (!err && area_m)
100475+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
100476+ next_m->vm_pgoff - pglen, NULL);
100477+#endif
100478+
100479+ }
100480 if (err)
100481 return NULL;
100482 khugepaged_enter_vma_merge(area);
100483@@ -1176,8 +1262,10 @@ none:
100484 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
100485 struct file *file, long pages)
100486 {
100487- const unsigned long stack_flags
100488- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
100489+
100490+#ifdef CONFIG_PAX_RANDMMAP
100491+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
100492+#endif
100493
100494 mm->total_vm += pages;
100495
100496@@ -1185,7 +1273,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
100497 mm->shared_vm += pages;
100498 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
100499 mm->exec_vm += pages;
100500- } else if (flags & stack_flags)
100501+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
100502 mm->stack_vm += pages;
100503 }
100504 #endif /* CONFIG_PROC_FS */
100505@@ -1215,6 +1303,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
100506 locked += mm->locked_vm;
100507 lock_limit = rlimit(RLIMIT_MEMLOCK);
100508 lock_limit >>= PAGE_SHIFT;
100509+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
100510 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
100511 return -EAGAIN;
100512 }
100513@@ -1241,7 +1330,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
100514 * (the exception is when the underlying filesystem is noexec
100515 * mounted, in which case we dont add PROT_EXEC.)
100516 */
100517- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
100518+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
100519 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
100520 prot |= PROT_EXEC;
100521
100522@@ -1267,7 +1356,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
100523 /* Obtain the address to map to. we verify (or select) it and ensure
100524 * that it represents a valid section of the address space.
100525 */
100526- addr = get_unmapped_area(file, addr, len, pgoff, flags);
100527+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
100528 if (addr & ~PAGE_MASK)
100529 return addr;
100530
100531@@ -1278,6 +1367,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
100532 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
100533 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
100534
100535+#ifdef CONFIG_PAX_MPROTECT
100536+ if (mm->pax_flags & MF_PAX_MPROTECT) {
100537+
100538+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
100539+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
100540+ mm->binfmt->handle_mmap)
100541+ mm->binfmt->handle_mmap(file);
100542+#endif
100543+
100544+#ifndef CONFIG_PAX_MPROTECT_COMPAT
100545+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
100546+ gr_log_rwxmmap(file);
100547+
100548+#ifdef CONFIG_PAX_EMUPLT
100549+ vm_flags &= ~VM_EXEC;
100550+#else
100551+ return -EPERM;
100552+#endif
100553+
100554+ }
100555+
100556+ if (!(vm_flags & VM_EXEC))
100557+ vm_flags &= ~VM_MAYEXEC;
100558+#else
100559+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
100560+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
100561+#endif
100562+ else
100563+ vm_flags &= ~VM_MAYWRITE;
100564+ }
100565+#endif
100566+
100567+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
100568+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
100569+ vm_flags &= ~VM_PAGEEXEC;
100570+#endif
100571+
100572 if (flags & MAP_LOCKED)
100573 if (!can_do_mlock())
100574 return -EPERM;
100575@@ -1365,6 +1491,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
100576 vm_flags |= VM_NORESERVE;
100577 }
100578
100579+ if (!gr_acl_handle_mmap(file, prot))
100580+ return -EACCES;
100581+
100582 addr = mmap_region(file, addr, len, vm_flags, pgoff);
100583 if (!IS_ERR_VALUE(addr) &&
100584 ((vm_flags & VM_LOCKED) ||
100585@@ -1458,7 +1587,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
100586 vm_flags_t vm_flags = vma->vm_flags;
100587
100588 /* If it was private or non-writable, the write bit is already clear */
100589- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
100590+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
100591 return 0;
100592
100593 /* The backer wishes to know when pages are first written to? */
100594@@ -1504,7 +1633,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
100595 struct rb_node **rb_link, *rb_parent;
100596 unsigned long charged = 0;
100597
100598+#ifdef CONFIG_PAX_SEGMEXEC
100599+ struct vm_area_struct *vma_m = NULL;
100600+#endif
100601+
100602+ /*
100603+ * mm->mmap_sem is required to protect against another thread
100604+ * changing the mappings in case we sleep.
100605+ */
100606+ verify_mm_writelocked(mm);
100607+
100608 /* Check against address space limit. */
100609+
100610+#ifdef CONFIG_PAX_RANDMMAP
100611+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
100612+#endif
100613+
100614 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
100615 unsigned long nr_pages;
100616
100617@@ -1523,11 +1667,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
100618
100619 /* Clear old maps */
100620 error = -ENOMEM;
100621-munmap_back:
100622 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
100623 if (do_munmap(mm, addr, len))
100624 return -ENOMEM;
100625- goto munmap_back;
100626+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
100627 }
100628
100629 /*
100630@@ -1558,6 +1701,16 @@ munmap_back:
100631 goto unacct_error;
100632 }
100633
100634+#ifdef CONFIG_PAX_SEGMEXEC
100635+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
100636+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
100637+ if (!vma_m) {
100638+ error = -ENOMEM;
100639+ goto free_vma;
100640+ }
100641+ }
100642+#endif
100643+
100644 vma->vm_mm = mm;
100645 vma->vm_start = addr;
100646 vma->vm_end = addr + len;
100647@@ -1577,6 +1730,13 @@ munmap_back:
100648 if (error)
100649 goto unmap_and_free_vma;
100650
100651+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
100652+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
100653+ vma->vm_flags |= VM_PAGEEXEC;
100654+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
100655+ }
100656+#endif
100657+
100658 /* Can addr have changed??
100659 *
100660 * Answer: Yes, several device drivers can do it in their
100661@@ -1610,6 +1770,12 @@ munmap_back:
100662 }
100663
100664 vma_link(mm, vma, prev, rb_link, rb_parent);
100665+
100666+#ifdef CONFIG_PAX_SEGMEXEC
100667+ if (vma_m)
100668+ BUG_ON(pax_mirror_vma(vma_m, vma));
100669+#endif
100670+
100671 /* Once vma denies write, undo our temporary denial count */
100672 if (vm_flags & VM_DENYWRITE)
100673 allow_write_access(file);
100674@@ -1618,6 +1784,7 @@ out:
100675 perf_event_mmap(vma);
100676
100677 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
100678+ track_exec_limit(mm, addr, addr + len, vm_flags);
100679 if (vm_flags & VM_LOCKED) {
100680 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
100681 vma == get_gate_vma(current->mm)))
100682@@ -1650,6 +1817,12 @@ unmap_and_free_vma:
100683 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
100684 charged = 0;
100685 free_vma:
100686+
100687+#ifdef CONFIG_PAX_SEGMEXEC
100688+ if (vma_m)
100689+ kmem_cache_free(vm_area_cachep, vma_m);
100690+#endif
100691+
100692 kmem_cache_free(vm_area_cachep, vma);
100693 unacct_error:
100694 if (charged)
100695@@ -1657,7 +1830,63 @@ unacct_error:
100696 return error;
100697 }
100698
100699-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
100700+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
100701+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
100702+{
100703+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
100704+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
100705+
100706+ return 0;
100707+}
100708+#endif
100709+
100710+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
100711+{
100712+ if (!vma) {
100713+#ifdef CONFIG_STACK_GROWSUP
100714+ if (addr > sysctl_heap_stack_gap)
100715+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
100716+ else
100717+ vma = find_vma(current->mm, 0);
100718+ if (vma && (vma->vm_flags & VM_GROWSUP))
100719+ return false;
100720+#endif
100721+ return true;
100722+ }
100723+
100724+ if (addr + len > vma->vm_start)
100725+ return false;
100726+
100727+ if (vma->vm_flags & VM_GROWSDOWN)
100728+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
100729+#ifdef CONFIG_STACK_GROWSUP
100730+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
100731+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
100732+#endif
100733+ else if (offset)
100734+ return offset <= vma->vm_start - addr - len;
100735+
100736+ return true;
100737+}
100738+
100739+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
100740+{
100741+ if (vma->vm_start < len)
100742+ return -ENOMEM;
100743+
100744+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
100745+ if (offset <= vma->vm_start - len)
100746+ return vma->vm_start - len - offset;
100747+ else
100748+ return -ENOMEM;
100749+ }
100750+
100751+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
100752+ return vma->vm_start - len - sysctl_heap_stack_gap;
100753+ return -ENOMEM;
100754+}
100755+
100756+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
100757 {
100758 /*
100759 * We implement the search by looking for an rbtree node that
100760@@ -1705,11 +1934,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
100761 }
100762 }
100763
100764- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
100765+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
100766 check_current:
100767 /* Check if current node has a suitable gap */
100768 if (gap_start > high_limit)
100769 return -ENOMEM;
100770+
100771+ if (gap_end - gap_start > info->threadstack_offset)
100772+ gap_start += info->threadstack_offset;
100773+ else
100774+ gap_start = gap_end;
100775+
100776+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
100777+ if (gap_end - gap_start > sysctl_heap_stack_gap)
100778+ gap_start += sysctl_heap_stack_gap;
100779+ else
100780+ gap_start = gap_end;
100781+ }
100782+ if (vma->vm_flags & VM_GROWSDOWN) {
100783+ if (gap_end - gap_start > sysctl_heap_stack_gap)
100784+ gap_end -= sysctl_heap_stack_gap;
100785+ else
100786+ gap_end = gap_start;
100787+ }
100788 if (gap_end >= low_limit && gap_end - gap_start >= length)
100789 goto found;
100790
100791@@ -1759,7 +2006,7 @@ found:
100792 return gap_start;
100793 }
100794
100795-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
100796+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
100797 {
100798 struct mm_struct *mm = current->mm;
100799 struct vm_area_struct *vma;
100800@@ -1813,6 +2060,24 @@ check_current:
100801 gap_end = vma->vm_start;
100802 if (gap_end < low_limit)
100803 return -ENOMEM;
100804+
100805+ if (gap_end - gap_start > info->threadstack_offset)
100806+ gap_end -= info->threadstack_offset;
100807+ else
100808+ gap_end = gap_start;
100809+
100810+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
100811+ if (gap_end - gap_start > sysctl_heap_stack_gap)
100812+ gap_start += sysctl_heap_stack_gap;
100813+ else
100814+ gap_start = gap_end;
100815+ }
100816+ if (vma->vm_flags & VM_GROWSDOWN) {
100817+ if (gap_end - gap_start > sysctl_heap_stack_gap)
100818+ gap_end -= sysctl_heap_stack_gap;
100819+ else
100820+ gap_end = gap_start;
100821+ }
100822 if (gap_start <= high_limit && gap_end - gap_start >= length)
100823 goto found;
100824
100825@@ -1876,6 +2141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
100826 struct mm_struct *mm = current->mm;
100827 struct vm_area_struct *vma;
100828 struct vm_unmapped_area_info info;
100829+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
100830
100831 if (len > TASK_SIZE - mmap_min_addr)
100832 return -ENOMEM;
100833@@ -1883,11 +2149,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
100834 if (flags & MAP_FIXED)
100835 return addr;
100836
100837+#ifdef CONFIG_PAX_RANDMMAP
100838+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
100839+#endif
100840+
100841 if (addr) {
100842 addr = PAGE_ALIGN(addr);
100843 vma = find_vma(mm, addr);
100844 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
100845- (!vma || addr + len <= vma->vm_start))
100846+ check_heap_stack_gap(vma, addr, len, offset))
100847 return addr;
100848 }
100849
100850@@ -1896,6 +2166,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
100851 info.low_limit = mm->mmap_base;
100852 info.high_limit = TASK_SIZE;
100853 info.align_mask = 0;
100854+ info.threadstack_offset = offset;
100855 return vm_unmapped_area(&info);
100856 }
100857 #endif
100858@@ -1914,6 +2185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
100859 struct mm_struct *mm = current->mm;
100860 unsigned long addr = addr0;
100861 struct vm_unmapped_area_info info;
100862+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
100863
100864 /* requested length too big for entire address space */
100865 if (len > TASK_SIZE - mmap_min_addr)
100866@@ -1922,12 +2194,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
100867 if (flags & MAP_FIXED)
100868 return addr;
100869
100870+#ifdef CONFIG_PAX_RANDMMAP
100871+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
100872+#endif
100873+
100874 /* requesting a specific address */
100875 if (addr) {
100876 addr = PAGE_ALIGN(addr);
100877 vma = find_vma(mm, addr);
100878 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
100879- (!vma || addr + len <= vma->vm_start))
100880+ check_heap_stack_gap(vma, addr, len, offset))
100881 return addr;
100882 }
100883
100884@@ -1936,6 +2212,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
100885 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
100886 info.high_limit = mm->mmap_base;
100887 info.align_mask = 0;
100888+ info.threadstack_offset = offset;
100889 addr = vm_unmapped_area(&info);
100890
100891 /*
100892@@ -1948,6 +2225,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
100893 VM_BUG_ON(addr != -ENOMEM);
100894 info.flags = 0;
100895 info.low_limit = TASK_UNMAPPED_BASE;
100896+
100897+#ifdef CONFIG_PAX_RANDMMAP
100898+ if (mm->pax_flags & MF_PAX_RANDMMAP)
100899+ info.low_limit += mm->delta_mmap;
100900+#endif
100901+
100902 info.high_limit = TASK_SIZE;
100903 addr = vm_unmapped_area(&info);
100904 }
100905@@ -2048,6 +2331,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
100906 return vma;
100907 }
100908
100909+#ifdef CONFIG_PAX_SEGMEXEC
100910+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
100911+{
100912+ struct vm_area_struct *vma_m;
100913+
100914+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
100915+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
100916+ BUG_ON(vma->vm_mirror);
100917+ return NULL;
100918+ }
100919+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
100920+ vma_m = vma->vm_mirror;
100921+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
100922+ BUG_ON(vma->vm_file != vma_m->vm_file);
100923+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
100924+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
100925+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
100926+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
100927+ return vma_m;
100928+}
100929+#endif
100930+
100931 /*
100932 * Verify that the stack growth is acceptable and
100933 * update accounting. This is shared with both the
100934@@ -2064,6 +2369,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
100935 return -ENOMEM;
100936
100937 /* Stack limit test */
100938+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
100939 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
100940 return -ENOMEM;
100941
100942@@ -2074,6 +2380,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
100943 locked = mm->locked_vm + grow;
100944 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
100945 limit >>= PAGE_SHIFT;
100946+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
100947 if (locked > limit && !capable(CAP_IPC_LOCK))
100948 return -ENOMEM;
100949 }
100950@@ -2103,37 +2410,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
100951 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
100952 * vma is the last one with address > vma->vm_end. Have to extend vma.
100953 */
100954+#ifndef CONFIG_IA64
100955+static
100956+#endif
100957 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
100958 {
100959 int error;
100960+ bool locknext;
100961
100962 if (!(vma->vm_flags & VM_GROWSUP))
100963 return -EFAULT;
100964
100965+ /* Also guard against wrapping around to address 0. */
100966+ if (address < PAGE_ALIGN(address+1))
100967+ address = PAGE_ALIGN(address+1);
100968+ else
100969+ return -ENOMEM;
100970+
100971 /*
100972 * We must make sure the anon_vma is allocated
100973 * so that the anon_vma locking is not a noop.
100974 */
100975 if (unlikely(anon_vma_prepare(vma)))
100976 return -ENOMEM;
100977+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
100978+ if (locknext && anon_vma_prepare(vma->vm_next))
100979+ return -ENOMEM;
100980 vma_lock_anon_vma(vma);
100981+ if (locknext)
100982+ vma_lock_anon_vma(vma->vm_next);
100983
100984 /*
100985 * vma->vm_start/vm_end cannot change under us because the caller
100986 * is required to hold the mmap_sem in read mode. We need the
100987- * anon_vma lock to serialize against concurrent expand_stacks.
100988- * Also guard against wrapping around to address 0.
100989+ * anon_vma locks to serialize against concurrent expand_stacks
100990+ * and expand_upwards.
100991 */
100992- if (address < PAGE_ALIGN(address+4))
100993- address = PAGE_ALIGN(address+4);
100994- else {
100995- vma_unlock_anon_vma(vma);
100996- return -ENOMEM;
100997- }
100998 error = 0;
100999
101000 /* Somebody else might have raced and expanded it already */
101001- if (address > vma->vm_end) {
101002+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
101003+ error = -ENOMEM;
101004+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
101005 unsigned long size, grow;
101006
101007 size = address - vma->vm_start;
101008@@ -2168,6 +2486,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
101009 }
101010 }
101011 }
101012+ if (locknext)
101013+ vma_unlock_anon_vma(vma->vm_next);
101014 vma_unlock_anon_vma(vma);
101015 khugepaged_enter_vma_merge(vma);
101016 validate_mm(vma->vm_mm);
101017@@ -2182,6 +2502,8 @@ int expand_downwards(struct vm_area_struct *vma,
101018 unsigned long address)
101019 {
101020 int error;
101021+ bool lockprev = false;
101022+ struct vm_area_struct *prev;
101023
101024 /*
101025 * We must make sure the anon_vma is allocated
101026@@ -2195,6 +2517,15 @@ int expand_downwards(struct vm_area_struct *vma,
101027 if (error)
101028 return error;
101029
101030+ prev = vma->vm_prev;
101031+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
101032+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
101033+#endif
101034+ if (lockprev && anon_vma_prepare(prev))
101035+ return -ENOMEM;
101036+ if (lockprev)
101037+ vma_lock_anon_vma(prev);
101038+
101039 vma_lock_anon_vma(vma);
101040
101041 /*
101042@@ -2204,9 +2535,17 @@ int expand_downwards(struct vm_area_struct *vma,
101043 */
101044
101045 /* Somebody else might have raced and expanded it already */
101046- if (address < vma->vm_start) {
101047+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
101048+ error = -ENOMEM;
101049+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
101050 unsigned long size, grow;
101051
101052+#ifdef CONFIG_PAX_SEGMEXEC
101053+ struct vm_area_struct *vma_m;
101054+
101055+ vma_m = pax_find_mirror_vma(vma);
101056+#endif
101057+
101058 size = vma->vm_end - address;
101059 grow = (vma->vm_start - address) >> PAGE_SHIFT;
101060
101061@@ -2231,13 +2570,27 @@ int expand_downwards(struct vm_area_struct *vma,
101062 vma->vm_pgoff -= grow;
101063 anon_vma_interval_tree_post_update_vma(vma);
101064 vma_gap_update(vma);
101065+
101066+#ifdef CONFIG_PAX_SEGMEXEC
101067+ if (vma_m) {
101068+ anon_vma_interval_tree_pre_update_vma(vma_m);
101069+ vma_m->vm_start -= grow << PAGE_SHIFT;
101070+ vma_m->vm_pgoff -= grow;
101071+ anon_vma_interval_tree_post_update_vma(vma_m);
101072+ vma_gap_update(vma_m);
101073+ }
101074+#endif
101075+
101076 spin_unlock(&vma->vm_mm->page_table_lock);
101077
101078+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
101079 perf_event_mmap(vma);
101080 }
101081 }
101082 }
101083 vma_unlock_anon_vma(vma);
101084+ if (lockprev)
101085+ vma_unlock_anon_vma(prev);
101086 khugepaged_enter_vma_merge(vma);
101087 validate_mm(vma->vm_mm);
101088 return error;
101089@@ -2335,6 +2688,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
101090 do {
101091 long nrpages = vma_pages(vma);
101092
101093+#ifdef CONFIG_PAX_SEGMEXEC
101094+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
101095+ vma = remove_vma(vma);
101096+ continue;
101097+ }
101098+#endif
101099+
101100 if (vma->vm_flags & VM_ACCOUNT)
101101 nr_accounted += nrpages;
101102 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
101103@@ -2379,6 +2739,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
101104 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
101105 vma->vm_prev = NULL;
101106 do {
101107+
101108+#ifdef CONFIG_PAX_SEGMEXEC
101109+ if (vma->vm_mirror) {
101110+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
101111+ vma->vm_mirror->vm_mirror = NULL;
101112+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
101113+ vma->vm_mirror = NULL;
101114+ }
101115+#endif
101116+
101117 vma_rb_erase(vma, &mm->mm_rb);
101118 mm->map_count--;
101119 tail_vma = vma;
101120@@ -2406,14 +2776,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
101121 struct vm_area_struct *new;
101122 int err = -ENOMEM;
101123
101124+#ifdef CONFIG_PAX_SEGMEXEC
101125+ struct vm_area_struct *vma_m, *new_m = NULL;
101126+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
101127+#endif
101128+
101129 if (is_vm_hugetlb_page(vma) && (addr &
101130 ~(huge_page_mask(hstate_vma(vma)))))
101131 return -EINVAL;
101132
101133+#ifdef CONFIG_PAX_SEGMEXEC
101134+ vma_m = pax_find_mirror_vma(vma);
101135+#endif
101136+
101137 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
101138 if (!new)
101139 goto out_err;
101140
101141+#ifdef CONFIG_PAX_SEGMEXEC
101142+ if (vma_m) {
101143+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
101144+ if (!new_m) {
101145+ kmem_cache_free(vm_area_cachep, new);
101146+ goto out_err;
101147+ }
101148+ }
101149+#endif
101150+
101151 /* most fields are the same, copy all, and then fixup */
101152 *new = *vma;
101153
101154@@ -2426,6 +2815,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
101155 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
101156 }
101157
101158+#ifdef CONFIG_PAX_SEGMEXEC
101159+ if (vma_m) {
101160+ *new_m = *vma_m;
101161+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
101162+ new_m->vm_mirror = new;
101163+ new->vm_mirror = new_m;
101164+
101165+ if (new_below)
101166+ new_m->vm_end = addr_m;
101167+ else {
101168+ new_m->vm_start = addr_m;
101169+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
101170+ }
101171+ }
101172+#endif
101173+
101174 err = vma_dup_policy(vma, new);
101175 if (err)
101176 goto out_free_vma;
101177@@ -2445,6 +2850,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
101178 else
101179 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
101180
101181+#ifdef CONFIG_PAX_SEGMEXEC
101182+ if (!err && vma_m) {
101183+ struct mempolicy *pol = vma_policy(new);
101184+
101185+ if (anon_vma_clone(new_m, vma_m))
101186+ goto out_free_mpol;
101187+
101188+ mpol_get(pol);
101189+ set_vma_policy(new_m, pol);
101190+
101191+ if (new_m->vm_file)
101192+ get_file(new_m->vm_file);
101193+
101194+ if (new_m->vm_ops && new_m->vm_ops->open)
101195+ new_m->vm_ops->open(new_m);
101196+
101197+ if (new_below)
101198+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
101199+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
101200+ else
101201+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
101202+
101203+ if (err) {
101204+ if (new_m->vm_ops && new_m->vm_ops->close)
101205+ new_m->vm_ops->close(new_m);
101206+ if (new_m->vm_file)
101207+ fput(new_m->vm_file);
101208+ mpol_put(pol);
101209+ }
101210+ }
101211+#endif
101212+
101213 /* Success. */
101214 if (!err)
101215 return 0;
101216@@ -2454,10 +2891,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
101217 new->vm_ops->close(new);
101218 if (new->vm_file)
101219 fput(new->vm_file);
101220- unlink_anon_vmas(new);
101221 out_free_mpol:
101222 mpol_put(vma_policy(new));
101223 out_free_vma:
101224+
101225+#ifdef CONFIG_PAX_SEGMEXEC
101226+ if (new_m) {
101227+ unlink_anon_vmas(new_m);
101228+ kmem_cache_free(vm_area_cachep, new_m);
101229+ }
101230+#endif
101231+
101232+ unlink_anon_vmas(new);
101233 kmem_cache_free(vm_area_cachep, new);
101234 out_err:
101235 return err;
101236@@ -2470,6 +2915,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
101237 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
101238 unsigned long addr, int new_below)
101239 {
101240+
101241+#ifdef CONFIG_PAX_SEGMEXEC
101242+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
101243+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
101244+ if (mm->map_count >= sysctl_max_map_count-1)
101245+ return -ENOMEM;
101246+ } else
101247+#endif
101248+
101249 if (mm->map_count >= sysctl_max_map_count)
101250 return -ENOMEM;
101251
101252@@ -2481,11 +2935,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
101253 * work. This now handles partial unmappings.
101254 * Jeremy Fitzhardinge <jeremy@goop.org>
101255 */
101256+#ifdef CONFIG_PAX_SEGMEXEC
101257 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
101258 {
101259+ int ret = __do_munmap(mm, start, len);
101260+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
101261+ return ret;
101262+
101263+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
101264+}
101265+
101266+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
101267+#else
101268+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
101269+#endif
101270+{
101271 unsigned long end;
101272 struct vm_area_struct *vma, *prev, *last;
101273
101274+ /*
101275+ * mm->mmap_sem is required to protect against another thread
101276+ * changing the mappings in case we sleep.
101277+ */
101278+ verify_mm_writelocked(mm);
101279+
101280 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
101281 return -EINVAL;
101282
101283@@ -2560,6 +3033,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
101284 /* Fix up all other VM information */
101285 remove_vma_list(mm, vma);
101286
101287+ track_exec_limit(mm, start, end, 0UL);
101288+
101289 return 0;
101290 }
101291
101292@@ -2568,6 +3043,13 @@ int vm_munmap(unsigned long start, size_t len)
101293 int ret;
101294 struct mm_struct *mm = current->mm;
101295
101296+
101297+#ifdef CONFIG_PAX_SEGMEXEC
101298+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
101299+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
101300+ return -EINVAL;
101301+#endif
101302+
101303 down_write(&mm->mmap_sem);
101304 ret = do_munmap(mm, start, len);
101305 up_write(&mm->mmap_sem);
101306@@ -2581,16 +3063,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
101307 return vm_munmap(addr, len);
101308 }
101309
101310-static inline void verify_mm_writelocked(struct mm_struct *mm)
101311-{
101312-#ifdef CONFIG_DEBUG_VM
101313- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
101314- WARN_ON(1);
101315- up_read(&mm->mmap_sem);
101316- }
101317-#endif
101318-}
101319-
101320 /*
101321 * this is really a simplified "do_mmap". it only handles
101322 * anonymous maps. eventually we may be able to do some
101323@@ -2604,6 +3076,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
101324 struct rb_node ** rb_link, * rb_parent;
101325 pgoff_t pgoff = addr >> PAGE_SHIFT;
101326 int error;
101327+ unsigned long charged;
101328
101329 len = PAGE_ALIGN(len);
101330 if (!len)
101331@@ -2611,10 +3084,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
101332
101333 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
101334
101335+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
101336+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
101337+ flags &= ~VM_EXEC;
101338+
101339+#ifdef CONFIG_PAX_MPROTECT
101340+ if (mm->pax_flags & MF_PAX_MPROTECT)
101341+ flags &= ~VM_MAYEXEC;
101342+#endif
101343+
101344+ }
101345+#endif
101346+
101347 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
101348 if (error & ~PAGE_MASK)
101349 return error;
101350
101351+ charged = len >> PAGE_SHIFT;
101352+
101353 error = mlock_future_check(mm, mm->def_flags, len);
101354 if (error)
101355 return error;
101356@@ -2628,21 +3115,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
101357 /*
101358 * Clear old maps. this also does some error checking for us
101359 */
101360- munmap_back:
101361 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
101362 if (do_munmap(mm, addr, len))
101363 return -ENOMEM;
101364- goto munmap_back;
101365+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
101366 }
101367
101368 /* Check against address space limits *after* clearing old maps... */
101369- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
101370+ if (!may_expand_vm(mm, charged))
101371 return -ENOMEM;
101372
101373 if (mm->map_count > sysctl_max_map_count)
101374 return -ENOMEM;
101375
101376- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
101377+ if (security_vm_enough_memory_mm(mm, charged))
101378 return -ENOMEM;
101379
101380 /* Can we just expand an old private anonymous mapping? */
101381@@ -2656,7 +3142,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
101382 */
101383 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
101384 if (!vma) {
101385- vm_unacct_memory(len >> PAGE_SHIFT);
101386+ vm_unacct_memory(charged);
101387 return -ENOMEM;
101388 }
101389
101390@@ -2670,10 +3156,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
101391 vma_link(mm, vma, prev, rb_link, rb_parent);
101392 out:
101393 perf_event_mmap(vma);
101394- mm->total_vm += len >> PAGE_SHIFT;
101395+ mm->total_vm += charged;
101396 if (flags & VM_LOCKED)
101397- mm->locked_vm += (len >> PAGE_SHIFT);
101398+ mm->locked_vm += charged;
101399 vma->vm_flags |= VM_SOFTDIRTY;
101400+ track_exec_limit(mm, addr, addr + len, flags);
101401 return addr;
101402 }
101403
101404@@ -2735,6 +3222,7 @@ void exit_mmap(struct mm_struct *mm)
101405 while (vma) {
101406 if (vma->vm_flags & VM_ACCOUNT)
101407 nr_accounted += vma_pages(vma);
101408+ vma->vm_mirror = NULL;
101409 vma = remove_vma(vma);
101410 }
101411 vm_unacct_memory(nr_accounted);
101412@@ -2752,6 +3240,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
101413 struct vm_area_struct *prev;
101414 struct rb_node **rb_link, *rb_parent;
101415
101416+#ifdef CONFIG_PAX_SEGMEXEC
101417+ struct vm_area_struct *vma_m = NULL;
101418+#endif
101419+
101420+ if (security_mmap_addr(vma->vm_start))
101421+ return -EPERM;
101422+
101423 /*
101424 * The vm_pgoff of a purely anonymous vma should be irrelevant
101425 * until its first write fault, when page's anon_vma and index
101426@@ -2775,7 +3270,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
101427 security_vm_enough_memory_mm(mm, vma_pages(vma)))
101428 return -ENOMEM;
101429
101430+#ifdef CONFIG_PAX_SEGMEXEC
101431+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
101432+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
101433+ if (!vma_m)
101434+ return -ENOMEM;
101435+ }
101436+#endif
101437+
101438 vma_link(mm, vma, prev, rb_link, rb_parent);
101439+
101440+#ifdef CONFIG_PAX_SEGMEXEC
101441+ if (vma_m)
101442+ BUG_ON(pax_mirror_vma(vma_m, vma));
101443+#endif
101444+
101445 return 0;
101446 }
101447
101448@@ -2794,6 +3303,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
101449 struct rb_node **rb_link, *rb_parent;
101450 bool faulted_in_anon_vma = true;
101451
101452+ BUG_ON(vma->vm_mirror);
101453+
101454 /*
101455 * If anonymous vma has not yet been faulted, update new pgoff
101456 * to match new location, to increase its chance of merging.
101457@@ -2858,6 +3369,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
101458 return NULL;
101459 }
101460
101461+#ifdef CONFIG_PAX_SEGMEXEC
101462+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
101463+{
101464+ struct vm_area_struct *prev_m;
101465+ struct rb_node **rb_link_m, *rb_parent_m;
101466+ struct mempolicy *pol_m;
101467+
101468+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
101469+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
101470+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
101471+ *vma_m = *vma;
101472+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
101473+ if (anon_vma_clone(vma_m, vma))
101474+ return -ENOMEM;
101475+ pol_m = vma_policy(vma_m);
101476+ mpol_get(pol_m);
101477+ set_vma_policy(vma_m, pol_m);
101478+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
101479+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
101480+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
101481+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
101482+ if (vma_m->vm_file)
101483+ get_file(vma_m->vm_file);
101484+ if (vma_m->vm_ops && vma_m->vm_ops->open)
101485+ vma_m->vm_ops->open(vma_m);
101486+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
101487+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
101488+ vma_m->vm_mirror = vma;
101489+ vma->vm_mirror = vma_m;
101490+ return 0;
101491+}
101492+#endif
101493+
101494 /*
101495 * Return true if the calling process may expand its vm space by the passed
101496 * number of pages
101497@@ -2869,6 +3413,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
101498
101499 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
101500
101501+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
101502 if (cur + npages > lim)
101503 return 0;
101504 return 1;
101505@@ -2951,6 +3496,22 @@ static struct vm_area_struct *__install_special_mapping(
101506 vma->vm_start = addr;
101507 vma->vm_end = addr + len;
101508
101509+#ifdef CONFIG_PAX_MPROTECT
101510+ if (mm->pax_flags & MF_PAX_MPROTECT) {
101511+#ifndef CONFIG_PAX_MPROTECT_COMPAT
101512+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
101513+ return ERR_PTR(-EPERM);
101514+ if (!(vm_flags & VM_EXEC))
101515+ vm_flags &= ~VM_MAYEXEC;
101516+#else
101517+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
101518+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
101519+#endif
101520+ else
101521+ vm_flags &= ~VM_MAYWRITE;
101522+ }
101523+#endif
101524+
101525 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
101526 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
101527
101528diff --git a/mm/mprotect.c b/mm/mprotect.c
101529index c43d557..0b7ccd2 100644
101530--- a/mm/mprotect.c
101531+++ b/mm/mprotect.c
101532@@ -24,10 +24,18 @@
101533 #include <linux/migrate.h>
101534 #include <linux/perf_event.h>
101535 #include <linux/ksm.h>
101536+#include <linux/sched/sysctl.h>
101537+
101538+#ifdef CONFIG_PAX_MPROTECT
101539+#include <linux/elf.h>
101540+#include <linux/binfmts.h>
101541+#endif
101542+
101543 #include <asm/uaccess.h>
101544 #include <asm/pgtable.h>
101545 #include <asm/cacheflush.h>
101546 #include <asm/tlbflush.h>
101547+#include <asm/mmu_context.h>
101548
101549 #ifndef pgprot_modify
101550 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
101551@@ -256,6 +264,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
101552 return pages;
101553 }
101554
101555+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
101556+/* called while holding the mmap semaphor for writing except stack expansion */
101557+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
101558+{
101559+ unsigned long oldlimit, newlimit = 0UL;
101560+
101561+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
101562+ return;
101563+
101564+ spin_lock(&mm->page_table_lock);
101565+ oldlimit = mm->context.user_cs_limit;
101566+ if ((prot & VM_EXEC) && oldlimit < end)
101567+ /* USER_CS limit moved up */
101568+ newlimit = end;
101569+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
101570+ /* USER_CS limit moved down */
101571+ newlimit = start;
101572+
101573+ if (newlimit) {
101574+ mm->context.user_cs_limit = newlimit;
101575+
101576+#ifdef CONFIG_SMP
101577+ wmb();
101578+ cpus_clear(mm->context.cpu_user_cs_mask);
101579+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
101580+#endif
101581+
101582+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
101583+ }
101584+ spin_unlock(&mm->page_table_lock);
101585+ if (newlimit == end) {
101586+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
101587+
101588+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
101589+ if (is_vm_hugetlb_page(vma))
101590+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
101591+ else
101592+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
101593+ }
101594+}
101595+#endif
101596+
101597 int
101598 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
101599 unsigned long start, unsigned long end, unsigned long newflags)
101600@@ -268,11 +318,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
101601 int error;
101602 int dirty_accountable = 0;
101603
101604+#ifdef CONFIG_PAX_SEGMEXEC
101605+ struct vm_area_struct *vma_m = NULL;
101606+ unsigned long start_m, end_m;
101607+
101608+ start_m = start + SEGMEXEC_TASK_SIZE;
101609+ end_m = end + SEGMEXEC_TASK_SIZE;
101610+#endif
101611+
101612 if (newflags == oldflags) {
101613 *pprev = vma;
101614 return 0;
101615 }
101616
101617+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
101618+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
101619+
101620+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
101621+ return -ENOMEM;
101622+
101623+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
101624+ return -ENOMEM;
101625+ }
101626+
101627 /*
101628 * If we make a private mapping writable we increase our commit;
101629 * but (without finer accounting) cannot reduce our commit if we
101630@@ -289,6 +357,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
101631 }
101632 }
101633
101634+#ifdef CONFIG_PAX_SEGMEXEC
101635+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
101636+ if (start != vma->vm_start) {
101637+ error = split_vma(mm, vma, start, 1);
101638+ if (error)
101639+ goto fail;
101640+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
101641+ *pprev = (*pprev)->vm_next;
101642+ }
101643+
101644+ if (end != vma->vm_end) {
101645+ error = split_vma(mm, vma, end, 0);
101646+ if (error)
101647+ goto fail;
101648+ }
101649+
101650+ if (pax_find_mirror_vma(vma)) {
101651+ error = __do_munmap(mm, start_m, end_m - start_m);
101652+ if (error)
101653+ goto fail;
101654+ } else {
101655+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
101656+ if (!vma_m) {
101657+ error = -ENOMEM;
101658+ goto fail;
101659+ }
101660+ vma->vm_flags = newflags;
101661+ error = pax_mirror_vma(vma_m, vma);
101662+ if (error) {
101663+ vma->vm_flags = oldflags;
101664+ goto fail;
101665+ }
101666+ }
101667+ }
101668+#endif
101669+
101670 /*
101671 * First try to merge with previous and/or next vma.
101672 */
101673@@ -319,9 +423,21 @@ success:
101674 * vm_flags and vm_page_prot are protected by the mmap_sem
101675 * held in write mode.
101676 */
101677+
101678+#ifdef CONFIG_PAX_SEGMEXEC
101679+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
101680+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
101681+#endif
101682+
101683 vma->vm_flags = newflags;
101684+
101685+#ifdef CONFIG_PAX_MPROTECT
101686+ if (mm->binfmt && mm->binfmt->handle_mprotect)
101687+ mm->binfmt->handle_mprotect(vma, newflags);
101688+#endif
101689+
101690 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
101691- vm_get_page_prot(newflags));
101692+ vm_get_page_prot(vma->vm_flags));
101693
101694 if (vma_wants_writenotify(vma)) {
101695 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
101696@@ -360,6 +476,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
101697 end = start + len;
101698 if (end <= start)
101699 return -ENOMEM;
101700+
101701+#ifdef CONFIG_PAX_SEGMEXEC
101702+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
101703+ if (end > SEGMEXEC_TASK_SIZE)
101704+ return -EINVAL;
101705+ } else
101706+#endif
101707+
101708+ if (end > TASK_SIZE)
101709+ return -EINVAL;
101710+
101711 if (!arch_validate_prot(prot))
101712 return -EINVAL;
101713
101714@@ -367,7 +494,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
101715 /*
101716 * Does the application expect PROT_READ to imply PROT_EXEC:
101717 */
101718- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
101719+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
101720 prot |= PROT_EXEC;
101721
101722 vm_flags = calc_vm_prot_bits(prot);
101723@@ -399,6 +526,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
101724 if (start > vma->vm_start)
101725 prev = vma;
101726
101727+#ifdef CONFIG_PAX_MPROTECT
101728+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
101729+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
101730+#endif
101731+
101732 for (nstart = start ; ; ) {
101733 unsigned long newflags;
101734
101735@@ -409,6 +541,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
101736
101737 /* newflags >> 4 shift VM_MAY% in place of VM_% */
101738 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
101739+ if (prot & (PROT_WRITE | PROT_EXEC))
101740+ gr_log_rwxmprotect(vma);
101741+
101742+ error = -EACCES;
101743+ goto out;
101744+ }
101745+
101746+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
101747 error = -EACCES;
101748 goto out;
101749 }
101750@@ -423,6 +563,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
101751 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
101752 if (error)
101753 goto out;
101754+
101755+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
101756+
101757 nstart = tmp;
101758
101759 if (nstart < prev->vm_end)
101760diff --git a/mm/mremap.c b/mm/mremap.c
101761index 05f1180..c3cde48 100644
101762--- a/mm/mremap.c
101763+++ b/mm/mremap.c
101764@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
101765 continue;
101766 pte = ptep_get_and_clear(mm, old_addr, old_pte);
101767 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
101768+
101769+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
101770+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
101771+ pte = pte_exprotect(pte);
101772+#endif
101773+
101774 pte = move_soft_dirty_pte(pte);
101775 set_pte_at(mm, new_addr, new_pte, pte);
101776 }
101777@@ -344,6 +350,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
101778 if (is_vm_hugetlb_page(vma))
101779 goto Einval;
101780
101781+#ifdef CONFIG_PAX_SEGMEXEC
101782+ if (pax_find_mirror_vma(vma))
101783+ goto Einval;
101784+#endif
101785+
101786 /* We can't remap across vm area boundaries */
101787 if (old_len > vma->vm_end - addr)
101788 goto Efault;
101789@@ -399,20 +410,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
101790 unsigned long ret = -EINVAL;
101791 unsigned long charged = 0;
101792 unsigned long map_flags;
101793+ unsigned long pax_task_size = TASK_SIZE;
101794
101795 if (new_addr & ~PAGE_MASK)
101796 goto out;
101797
101798- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
101799+#ifdef CONFIG_PAX_SEGMEXEC
101800+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
101801+ pax_task_size = SEGMEXEC_TASK_SIZE;
101802+#endif
101803+
101804+ pax_task_size -= PAGE_SIZE;
101805+
101806+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
101807 goto out;
101808
101809 /* Check if the location we're moving into overlaps the
101810 * old location at all, and fail if it does.
101811 */
101812- if ((new_addr <= addr) && (new_addr+new_len) > addr)
101813- goto out;
101814-
101815- if ((addr <= new_addr) && (addr+old_len) > new_addr)
101816+ if (addr + old_len > new_addr && new_addr + new_len > addr)
101817 goto out;
101818
101819 ret = do_munmap(mm, new_addr, new_len);
101820@@ -481,6 +497,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
101821 unsigned long ret = -EINVAL;
101822 unsigned long charged = 0;
101823 bool locked = false;
101824+ unsigned long pax_task_size = TASK_SIZE;
101825
101826 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
101827 return ret;
101828@@ -502,6 +519,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
101829 if (!new_len)
101830 return ret;
101831
101832+#ifdef CONFIG_PAX_SEGMEXEC
101833+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
101834+ pax_task_size = SEGMEXEC_TASK_SIZE;
101835+#endif
101836+
101837+ pax_task_size -= PAGE_SIZE;
101838+
101839+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
101840+ old_len > pax_task_size || addr > pax_task_size-old_len)
101841+ return ret;
101842+
101843 down_write(&current->mm->mmap_sem);
101844
101845 if (flags & MREMAP_FIXED) {
101846@@ -552,6 +580,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
101847 new_addr = addr;
101848 }
101849 ret = addr;
101850+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
101851 goto out;
101852 }
101853 }
101854@@ -575,7 +604,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
101855 goto out;
101856 }
101857
101858+ map_flags = vma->vm_flags;
101859 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
101860+ if (!(ret & ~PAGE_MASK)) {
101861+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
101862+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
101863+ }
101864 }
101865 out:
101866 if (ret & ~PAGE_MASK)
101867diff --git a/mm/nommu.c b/mm/nommu.c
101868index 4a852f6..4371a6b 100644
101869--- a/mm/nommu.c
101870+++ b/mm/nommu.c
101871@@ -70,7 +70,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
101872 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
101873 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
101874 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
101875-int heap_stack_gap = 0;
101876
101877 atomic_long_t mmap_pages_allocated;
101878
101879@@ -857,15 +856,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
101880 EXPORT_SYMBOL(find_vma);
101881
101882 /*
101883- * find a VMA
101884- * - we don't extend stack VMAs under NOMMU conditions
101885- */
101886-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
101887-{
101888- return find_vma(mm, addr);
101889-}
101890-
101891-/*
101892 * expand a stack to a given address
101893 * - not supported under NOMMU conditions
101894 */
101895@@ -1572,6 +1562,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
101896
101897 /* most fields are the same, copy all, and then fixup */
101898 *new = *vma;
101899+ INIT_LIST_HEAD(&new->anon_vma_chain);
101900 *region = *vma->vm_region;
101901 new->vm_region = region;
101902
101903@@ -2007,8 +1998,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
101904 }
101905 EXPORT_SYMBOL(generic_file_remap_pages);
101906
101907-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
101908- unsigned long addr, void *buf, int len, int write)
101909+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
101910+ unsigned long addr, void *buf, size_t len, int write)
101911 {
101912 struct vm_area_struct *vma;
101913
101914@@ -2049,8 +2040,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
101915 *
101916 * The caller must hold a reference on @mm.
101917 */
101918-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
101919- void *buf, int len, int write)
101920+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
101921+ void *buf, size_t len, int write)
101922 {
101923 return __access_remote_vm(NULL, mm, addr, buf, len, write);
101924 }
101925@@ -2059,7 +2050,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
101926 * Access another process' address space.
101927 * - source/target buffer must be kernel space
101928 */
101929-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
101930+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
101931 {
101932 struct mm_struct *mm;
101933
101934diff --git a/mm/page-writeback.c b/mm/page-writeback.c
101935index e0c9430..3c6bf79 100644
101936--- a/mm/page-writeback.c
101937+++ b/mm/page-writeback.c
101938@@ -667,7 +667,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
101939 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
101940 * - the bdi dirty thresh drops quickly due to change of JBOD workload
101941 */
101942-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
101943+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
101944 unsigned long thresh,
101945 unsigned long bg_thresh,
101946 unsigned long dirty,
101947diff --git a/mm/page_alloc.c b/mm/page_alloc.c
101948index ef44ad7..1056bc7 100644
101949--- a/mm/page_alloc.c
101950+++ b/mm/page_alloc.c
101951@@ -61,6 +61,7 @@
101952 #include <linux/page-debug-flags.h>
101953 #include <linux/hugetlb.h>
101954 #include <linux/sched/rt.h>
101955+#include <linux/random.h>
101956
101957 #include <asm/sections.h>
101958 #include <asm/tlbflush.h>
101959@@ -357,7 +358,7 @@ out:
101960 * This usage means that zero-order pages may not be compound.
101961 */
101962
101963-static void free_compound_page(struct page *page)
101964+void free_compound_page(struct page *page)
101965 {
101966 __free_pages_ok(page, compound_order(page));
101967 }
101968@@ -745,6 +746,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
101969 int i;
101970 int bad = 0;
101971
101972+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101973+ unsigned long index = 1UL << order;
101974+#endif
101975+
101976 trace_mm_page_free(page, order);
101977 kmemcheck_free_shadow(page, order);
101978
101979@@ -761,6 +766,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
101980 debug_check_no_obj_freed(page_address(page),
101981 PAGE_SIZE << order);
101982 }
101983+
101984+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101985+ for (; index; --index)
101986+ sanitize_highpage(page + index - 1);
101987+#endif
101988+
101989 arch_free_page(page, order);
101990 kernel_map_pages(page, 1 << order, 0);
101991
101992@@ -784,6 +795,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
101993 local_irq_restore(flags);
101994 }
101995
101996+#ifdef CONFIG_PAX_LATENT_ENTROPY
101997+bool __meminitdata extra_latent_entropy;
101998+
101999+static int __init setup_pax_extra_latent_entropy(char *str)
102000+{
102001+ extra_latent_entropy = true;
102002+ return 0;
102003+}
102004+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
102005+
102006+volatile u64 latent_entropy __latent_entropy;
102007+EXPORT_SYMBOL(latent_entropy);
102008+#endif
102009+
102010 void __init __free_pages_bootmem(struct page *page, unsigned int order)
102011 {
102012 unsigned int nr_pages = 1 << order;
102013@@ -799,6 +824,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
102014 __ClearPageReserved(p);
102015 set_page_count(p, 0);
102016
102017+#ifdef CONFIG_PAX_LATENT_ENTROPY
102018+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
102019+ u64 hash = 0;
102020+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
102021+ const u64 *data = lowmem_page_address(page);
102022+
102023+ for (index = 0; index < end; index++)
102024+ hash ^= hash + data[index];
102025+ latent_entropy ^= hash;
102026+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
102027+ }
102028+#endif
102029+
102030 page_zone(page)->managed_pages += nr_pages;
102031 set_page_refcounted(page);
102032 __free_pages(page, order);
102033@@ -927,8 +965,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
102034 arch_alloc_page(page, order);
102035 kernel_map_pages(page, 1 << order, 1);
102036
102037+#ifndef CONFIG_PAX_MEMORY_SANITIZE
102038 if (gfp_flags & __GFP_ZERO)
102039 prep_zero_page(page, order, gfp_flags);
102040+#endif
102041
102042 if (order && (gfp_flags & __GFP_COMP))
102043 prep_compound_page(page, order);
102044@@ -2427,7 +2467,7 @@ static void reset_alloc_batches(struct zonelist *zonelist,
102045 continue;
102046 mod_zone_page_state(zone, NR_ALLOC_BATCH,
102047 high_wmark_pages(zone) - low_wmark_pages(zone) -
102048- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
102049+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
102050 }
102051 }
102052
102053diff --git a/mm/percpu.c b/mm/percpu.c
102054index 2ddf9a9..f8fc075 100644
102055--- a/mm/percpu.c
102056+++ b/mm/percpu.c
102057@@ -123,7 +123,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
102058 static unsigned int pcpu_high_unit_cpu __read_mostly;
102059
102060 /* the address of the first chunk which starts with the kernel static area */
102061-void *pcpu_base_addr __read_mostly;
102062+void *pcpu_base_addr __read_only;
102063 EXPORT_SYMBOL_GPL(pcpu_base_addr);
102064
102065 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
102066diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
102067index 5077afc..846c9ef 100644
102068--- a/mm/process_vm_access.c
102069+++ b/mm/process_vm_access.c
102070@@ -13,6 +13,7 @@
102071 #include <linux/uio.h>
102072 #include <linux/sched.h>
102073 #include <linux/highmem.h>
102074+#include <linux/security.h>
102075 #include <linux/ptrace.h>
102076 #include <linux/slab.h>
102077 #include <linux/syscalls.h>
102078@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
102079 ssize_t iov_len;
102080 size_t total_len = iov_iter_count(iter);
102081
102082+ return -ENOSYS; // PaX: until properly audited
102083+
102084 /*
102085 * Work out how many pages of struct pages we're going to need
102086 * when eventually calling get_user_pages
102087 */
102088 for (i = 0; i < riovcnt; i++) {
102089 iov_len = rvec[i].iov_len;
102090- if (iov_len > 0) {
102091- nr_pages_iov = ((unsigned long)rvec[i].iov_base
102092- + iov_len)
102093- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
102094- / PAGE_SIZE + 1;
102095- nr_pages = max(nr_pages, nr_pages_iov);
102096- }
102097+ if (iov_len <= 0)
102098+ continue;
102099+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
102100+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
102101+ nr_pages = max(nr_pages, nr_pages_iov);
102102 }
102103
102104 if (nr_pages == 0)
102105@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
102106 goto free_proc_pages;
102107 }
102108
102109+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
102110+ rc = -EPERM;
102111+ goto put_task_struct;
102112+ }
102113+
102114 mm = mm_access(task, PTRACE_MODE_ATTACH);
102115 if (!mm || IS_ERR(mm)) {
102116 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
102117diff --git a/mm/rmap.c b/mm/rmap.c
102118index 22a4a76..9551288 100644
102119--- a/mm/rmap.c
102120+++ b/mm/rmap.c
102121@@ -164,6 +164,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
102122 struct anon_vma *anon_vma = vma->anon_vma;
102123 struct anon_vma_chain *avc;
102124
102125+#ifdef CONFIG_PAX_SEGMEXEC
102126+ struct anon_vma_chain *avc_m = NULL;
102127+#endif
102128+
102129 might_sleep();
102130 if (unlikely(!anon_vma)) {
102131 struct mm_struct *mm = vma->vm_mm;
102132@@ -173,6 +177,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
102133 if (!avc)
102134 goto out_enomem;
102135
102136+#ifdef CONFIG_PAX_SEGMEXEC
102137+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
102138+ if (!avc_m)
102139+ goto out_enomem_free_avc;
102140+#endif
102141+
102142 anon_vma = find_mergeable_anon_vma(vma);
102143 allocated = NULL;
102144 if (!anon_vma) {
102145@@ -186,6 +196,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
102146 /* page_table_lock to protect against threads */
102147 spin_lock(&mm->page_table_lock);
102148 if (likely(!vma->anon_vma)) {
102149+
102150+#ifdef CONFIG_PAX_SEGMEXEC
102151+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
102152+
102153+ if (vma_m) {
102154+ BUG_ON(vma_m->anon_vma);
102155+ vma_m->anon_vma = anon_vma;
102156+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
102157+ avc_m = NULL;
102158+ }
102159+#endif
102160+
102161 vma->anon_vma = anon_vma;
102162 anon_vma_chain_link(vma, avc, anon_vma);
102163 allocated = NULL;
102164@@ -196,12 +218,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
102165
102166 if (unlikely(allocated))
102167 put_anon_vma(allocated);
102168+
102169+#ifdef CONFIG_PAX_SEGMEXEC
102170+ if (unlikely(avc_m))
102171+ anon_vma_chain_free(avc_m);
102172+#endif
102173+
102174 if (unlikely(avc))
102175 anon_vma_chain_free(avc);
102176 }
102177 return 0;
102178
102179 out_enomem_free_avc:
102180+
102181+#ifdef CONFIG_PAX_SEGMEXEC
102182+ if (avc_m)
102183+ anon_vma_chain_free(avc_m);
102184+#endif
102185+
102186 anon_vma_chain_free(avc);
102187 out_enomem:
102188 return -ENOMEM;
102189@@ -237,7 +271,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
102190 * Attach the anon_vmas from src to dst.
102191 * Returns 0 on success, -ENOMEM on failure.
102192 */
102193-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
102194+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
102195 {
102196 struct anon_vma_chain *avc, *pavc;
102197 struct anon_vma *root = NULL;
102198@@ -270,7 +304,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
102199 * the corresponding VMA in the parent process is attached to.
102200 * Returns 0 on success, non-zero on failure.
102201 */
102202-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
102203+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
102204 {
102205 struct anon_vma_chain *avc;
102206 struct anon_vma *anon_vma;
102207@@ -374,8 +408,10 @@ static void anon_vma_ctor(void *data)
102208 void __init anon_vma_init(void)
102209 {
102210 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
102211- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
102212- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
102213+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
102214+ anon_vma_ctor);
102215+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
102216+ SLAB_PANIC|SLAB_NO_SANITIZE);
102217 }
102218
102219 /*
102220diff --git a/mm/shmem.c b/mm/shmem.c
102221index af68b15..1227320 100644
102222--- a/mm/shmem.c
102223+++ b/mm/shmem.c
102224@@ -33,7 +33,7 @@
102225 #include <linux/swap.h>
102226 #include <linux/aio.h>
102227
102228-static struct vfsmount *shm_mnt;
102229+struct vfsmount *shm_mnt;
102230
102231 #ifdef CONFIG_SHMEM
102232 /*
102233@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
102234 #define BOGO_DIRENT_SIZE 20
102235
102236 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
102237-#define SHORT_SYMLINK_LEN 128
102238+#define SHORT_SYMLINK_LEN 64
102239
102240 /*
102241 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
102242@@ -2219,6 +2219,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
102243 static int shmem_xattr_validate(const char *name)
102244 {
102245 struct { const char *prefix; size_t len; } arr[] = {
102246+
102247+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
102248+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
102249+#endif
102250+
102251 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
102252 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
102253 };
102254@@ -2274,6 +2279,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
102255 if (err)
102256 return err;
102257
102258+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
102259+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
102260+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
102261+ return -EOPNOTSUPP;
102262+ if (size > 8)
102263+ return -EINVAL;
102264+ }
102265+#endif
102266+
102267 return simple_xattr_set(&info->xattrs, name, value, size, flags);
102268 }
102269
102270@@ -2586,8 +2600,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
102271 int err = -ENOMEM;
102272
102273 /* Round up to L1_CACHE_BYTES to resist false sharing */
102274- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
102275- L1_CACHE_BYTES), GFP_KERNEL);
102276+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
102277 if (!sbinfo)
102278 return -ENOMEM;
102279
102280diff --git a/mm/slab.c b/mm/slab.c
102281index 3070b92..bcfff83 100644
102282--- a/mm/slab.c
102283+++ b/mm/slab.c
102284@@ -311,10 +311,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
102285 if ((x)->max_freeable < i) \
102286 (x)->max_freeable = i; \
102287 } while (0)
102288-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
102289-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
102290-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
102291-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
102292+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
102293+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
102294+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
102295+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
102296+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
102297+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
102298 #else
102299 #define STATS_INC_ACTIVE(x) do { } while (0)
102300 #define STATS_DEC_ACTIVE(x) do { } while (0)
102301@@ -331,6 +333,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
102302 #define STATS_INC_ALLOCMISS(x) do { } while (0)
102303 #define STATS_INC_FREEHIT(x) do { } while (0)
102304 #define STATS_INC_FREEMISS(x) do { } while (0)
102305+#define STATS_INC_SANITIZED(x) do { } while (0)
102306+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
102307 #endif
102308
102309 #if DEBUG
102310@@ -447,7 +451,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
102311 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
102312 */
102313 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
102314- const struct page *page, void *obj)
102315+ const struct page *page, const void *obj)
102316 {
102317 u32 offset = (obj - page->s_mem);
102318 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
102319@@ -1558,12 +1562,12 @@ void __init kmem_cache_init(void)
102320 */
102321
102322 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
102323- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
102324+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
102325
102326 if (INDEX_AC != INDEX_NODE)
102327 kmalloc_caches[INDEX_NODE] =
102328 create_kmalloc_cache("kmalloc-node",
102329- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
102330+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
102331
102332 slab_early_init = 0;
102333
102334@@ -3512,6 +3516,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
102335 struct array_cache *ac = cpu_cache_get(cachep);
102336
102337 check_irq_off();
102338+
102339+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102340+ if (pax_sanitize_slab) {
102341+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
102342+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
102343+
102344+ if (cachep->ctor)
102345+ cachep->ctor(objp);
102346+
102347+ STATS_INC_SANITIZED(cachep);
102348+ } else
102349+ STATS_INC_NOT_SANITIZED(cachep);
102350+ }
102351+#endif
102352+
102353 kmemleak_free_recursive(objp, cachep->flags);
102354 objp = cache_free_debugcheck(cachep, objp, caller);
102355
102356@@ -3735,6 +3754,7 @@ void kfree(const void *objp)
102357
102358 if (unlikely(ZERO_OR_NULL_PTR(objp)))
102359 return;
102360+ VM_BUG_ON(!virt_addr_valid(objp));
102361 local_irq_save(flags);
102362 kfree_debugcheck(objp);
102363 c = virt_to_cache(objp);
102364@@ -4176,14 +4196,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
102365 }
102366 /* cpu stats */
102367 {
102368- unsigned long allochit = atomic_read(&cachep->allochit);
102369- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
102370- unsigned long freehit = atomic_read(&cachep->freehit);
102371- unsigned long freemiss = atomic_read(&cachep->freemiss);
102372+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
102373+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
102374+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
102375+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
102376
102377 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
102378 allochit, allocmiss, freehit, freemiss);
102379 }
102380+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102381+ {
102382+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
102383+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
102384+
102385+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
102386+ }
102387+#endif
102388 #endif
102389 }
102390
102391@@ -4404,13 +4432,69 @@ static const struct file_operations proc_slabstats_operations = {
102392 static int __init slab_proc_init(void)
102393 {
102394 #ifdef CONFIG_DEBUG_SLAB_LEAK
102395- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
102396+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
102397 #endif
102398 return 0;
102399 }
102400 module_init(slab_proc_init);
102401 #endif
102402
102403+bool is_usercopy_object(const void *ptr)
102404+{
102405+ struct page *page;
102406+ struct kmem_cache *cachep;
102407+
102408+ if (ZERO_OR_NULL_PTR(ptr))
102409+ return false;
102410+
102411+ if (!slab_is_available())
102412+ return false;
102413+
102414+ if (!virt_addr_valid(ptr))
102415+ return false;
102416+
102417+ page = virt_to_head_page(ptr);
102418+
102419+ if (!PageSlab(page))
102420+ return false;
102421+
102422+ cachep = page->slab_cache;
102423+ return cachep->flags & SLAB_USERCOPY;
102424+}
102425+
102426+#ifdef CONFIG_PAX_USERCOPY
102427+const char *check_heap_object(const void *ptr, unsigned long n)
102428+{
102429+ struct page *page;
102430+ struct kmem_cache *cachep;
102431+ unsigned int objnr;
102432+ unsigned long offset;
102433+
102434+ if (ZERO_OR_NULL_PTR(ptr))
102435+ return "<null>";
102436+
102437+ if (!virt_addr_valid(ptr))
102438+ return NULL;
102439+
102440+ page = virt_to_head_page(ptr);
102441+
102442+ if (!PageSlab(page))
102443+ return NULL;
102444+
102445+ cachep = page->slab_cache;
102446+ if (!(cachep->flags & SLAB_USERCOPY))
102447+ return cachep->name;
102448+
102449+ objnr = obj_to_index(cachep, page, ptr);
102450+ BUG_ON(objnr >= cachep->num);
102451+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
102452+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
102453+ return NULL;
102454+
102455+ return cachep->name;
102456+}
102457+#endif
102458+
102459 /**
102460 * ksize - get the actual amount of memory allocated for a given object
102461 * @objp: Pointer to the object
102462diff --git a/mm/slab.h b/mm/slab.h
102463index 961a3fb..6b12514 100644
102464--- a/mm/slab.h
102465+++ b/mm/slab.h
102466@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
102467 /* The slab cache that manages slab cache information */
102468 extern struct kmem_cache *kmem_cache;
102469
102470+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102471+#ifdef CONFIG_X86_64
102472+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
102473+#else
102474+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
102475+#endif
102476+extern bool pax_sanitize_slab;
102477+#endif
102478+
102479 unsigned long calculate_alignment(unsigned long flags,
102480 unsigned long align, unsigned long size);
102481
102482@@ -67,7 +76,8 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
102483
102484 /* Legal flag mask for kmem_cache_create(), for various configurations */
102485 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
102486- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
102487+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
102488+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
102489
102490 #if defined(CONFIG_DEBUG_SLAB)
102491 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
102492@@ -251,6 +261,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
102493 return s;
102494
102495 page = virt_to_head_page(x);
102496+
102497+ BUG_ON(!PageSlab(page));
102498+
102499 cachep = page->slab_cache;
102500 if (slab_equal_or_root(cachep, s))
102501 return cachep;
102502diff --git a/mm/slab_common.c b/mm/slab_common.c
102503index d31c4ba..1121296 100644
102504--- a/mm/slab_common.c
102505+++ b/mm/slab_common.c
102506@@ -23,11 +23,22 @@
102507
102508 #include "slab.h"
102509
102510-enum slab_state slab_state;
102511+enum slab_state slab_state __read_only;
102512 LIST_HEAD(slab_caches);
102513 DEFINE_MUTEX(slab_mutex);
102514 struct kmem_cache *kmem_cache;
102515
102516+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102517+bool pax_sanitize_slab __read_only = true;
102518+static int __init pax_sanitize_slab_setup(char *str)
102519+{
102520+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
102521+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
102522+ return 1;
102523+}
102524+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
102525+#endif
102526+
102527 #ifdef CONFIG_DEBUG_VM
102528 static int kmem_cache_sanity_check(const char *name, size_t size)
102529 {
102530@@ -158,7 +169,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
102531 if (err)
102532 goto out_free_cache;
102533
102534- s->refcount = 1;
102535+ atomic_set(&s->refcount, 1);
102536 list_add(&s->list, &slab_caches);
102537 out:
102538 if (err)
102539@@ -339,8 +350,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
102540
102541 mutex_lock(&slab_mutex);
102542
102543- s->refcount--;
102544- if (s->refcount)
102545+ if (!atomic_dec_and_test(&s->refcount))
102546 goto out_unlock;
102547
102548 if (memcg_cleanup_cache_params(s) != 0)
102549@@ -360,7 +370,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
102550 rcu_barrier();
102551
102552 memcg_free_cache_params(s);
102553-#ifdef SLAB_SUPPORTS_SYSFS
102554+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
102555 sysfs_slab_remove(s);
102556 #else
102557 slab_kmem_cache_release(s);
102558@@ -416,7 +426,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
102559 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
102560 name, size, err);
102561
102562- s->refcount = -1; /* Exempt from merging for now */
102563+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
102564 }
102565
102566 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
102567@@ -429,7 +439,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
102568
102569 create_boot_cache(s, name, size, flags);
102570 list_add(&s->list, &slab_caches);
102571- s->refcount = 1;
102572+ atomic_set(&s->refcount, 1);
102573 return s;
102574 }
102575
102576@@ -441,6 +451,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
102577 EXPORT_SYMBOL(kmalloc_dma_caches);
102578 #endif
102579
102580+#ifdef CONFIG_PAX_USERCOPY_SLABS
102581+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
102582+EXPORT_SYMBOL(kmalloc_usercopy_caches);
102583+#endif
102584+
102585 /*
102586 * Conversion table for small slabs sizes / 8 to the index in the
102587 * kmalloc array. This is necessary for slabs < 192 since we have non power
102588@@ -505,6 +520,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
102589 return kmalloc_dma_caches[index];
102590
102591 #endif
102592+
102593+#ifdef CONFIG_PAX_USERCOPY_SLABS
102594+ if (unlikely((flags & GFP_USERCOPY)))
102595+ return kmalloc_usercopy_caches[index];
102596+
102597+#endif
102598+
102599 return kmalloc_caches[index];
102600 }
102601
102602@@ -561,7 +583,7 @@ void __init create_kmalloc_caches(unsigned long flags)
102603 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
102604 if (!kmalloc_caches[i]) {
102605 kmalloc_caches[i] = create_kmalloc_cache(NULL,
102606- 1 << i, flags);
102607+ 1 << i, SLAB_USERCOPY | flags);
102608 }
102609
102610 /*
102611@@ -570,10 +592,10 @@ void __init create_kmalloc_caches(unsigned long flags)
102612 * earlier power of two caches
102613 */
102614 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
102615- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
102616+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
102617
102618 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
102619- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
102620+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
102621 }
102622
102623 /* Kmalloc array is now usable */
102624@@ -606,6 +628,23 @@ void __init create_kmalloc_caches(unsigned long flags)
102625 }
102626 }
102627 #endif
102628+
102629+#ifdef CONFIG_PAX_USERCOPY_SLABS
102630+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
102631+ struct kmem_cache *s = kmalloc_caches[i];
102632+
102633+ if (s) {
102634+ int size = kmalloc_size(i);
102635+ char *n = kasprintf(GFP_NOWAIT,
102636+ "usercopy-kmalloc-%d", size);
102637+
102638+ BUG_ON(!n);
102639+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
102640+ size, SLAB_USERCOPY | flags);
102641+ }
102642+ }
102643+#endif
102644+
102645 }
102646 #endif /* !CONFIG_SLOB */
102647
102648@@ -664,6 +703,9 @@ void print_slabinfo_header(struct seq_file *m)
102649 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
102650 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
102651 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
102652+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102653+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
102654+#endif
102655 #endif
102656 seq_putc(m, '\n');
102657 }
102658diff --git a/mm/slob.c b/mm/slob.c
102659index 21980e0..ed9a648 100644
102660--- a/mm/slob.c
102661+++ b/mm/slob.c
102662@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
102663 /*
102664 * Return the size of a slob block.
102665 */
102666-static slobidx_t slob_units(slob_t *s)
102667+static slobidx_t slob_units(const slob_t *s)
102668 {
102669 if (s->units > 0)
102670 return s->units;
102671@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
102672 /*
102673 * Return the next free slob block pointer after this one.
102674 */
102675-static slob_t *slob_next(slob_t *s)
102676+static slob_t *slob_next(const slob_t *s)
102677 {
102678 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
102679 slobidx_t next;
102680@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
102681 /*
102682 * Returns true if s is the last free block in its page.
102683 */
102684-static int slob_last(slob_t *s)
102685+static int slob_last(const slob_t *s)
102686 {
102687 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
102688 }
102689
102690-static void *slob_new_pages(gfp_t gfp, int order, int node)
102691+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
102692 {
102693- void *page;
102694+ struct page *page;
102695
102696 #ifdef CONFIG_NUMA
102697 if (node != NUMA_NO_NODE)
102698@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
102699 if (!page)
102700 return NULL;
102701
102702- return page_address(page);
102703+ __SetPageSlab(page);
102704+ return page;
102705 }
102706
102707-static void slob_free_pages(void *b, int order)
102708+static void slob_free_pages(struct page *sp, int order)
102709 {
102710 if (current->reclaim_state)
102711 current->reclaim_state->reclaimed_slab += 1 << order;
102712- free_pages((unsigned long)b, order);
102713+ __ClearPageSlab(sp);
102714+ page_mapcount_reset(sp);
102715+ sp->private = 0;
102716+ __free_pages(sp, order);
102717 }
102718
102719 /*
102720@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
102721
102722 /* Not enough space: must allocate a new page */
102723 if (!b) {
102724- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
102725- if (!b)
102726+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
102727+ if (!sp)
102728 return NULL;
102729- sp = virt_to_page(b);
102730- __SetPageSlab(sp);
102731+ b = page_address(sp);
102732
102733 spin_lock_irqsave(&slob_lock, flags);
102734 sp->units = SLOB_UNITS(PAGE_SIZE);
102735 sp->freelist = b;
102736+ sp->private = 0;
102737 INIT_LIST_HEAD(&sp->lru);
102738 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
102739 set_slob_page_free(sp, slob_list);
102740@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
102741 if (slob_page_free(sp))
102742 clear_slob_page_free(sp);
102743 spin_unlock_irqrestore(&slob_lock, flags);
102744- __ClearPageSlab(sp);
102745- page_mapcount_reset(sp);
102746- slob_free_pages(b, 0);
102747+ slob_free_pages(sp, 0);
102748 return;
102749 }
102750
102751+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102752+ if (pax_sanitize_slab)
102753+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
102754+#endif
102755+
102756 if (!slob_page_free(sp)) {
102757 /* This slob page is about to become partially free. Easy! */
102758 sp->units = units;
102759@@ -424,11 +431,10 @@ out:
102760 */
102761
102762 static __always_inline void *
102763-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
102764+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
102765 {
102766- unsigned int *m;
102767- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
102768- void *ret;
102769+ slob_t *m;
102770+ void *ret = NULL;
102771
102772 gfp &= gfp_allowed_mask;
102773
102774@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
102775
102776 if (!m)
102777 return NULL;
102778- *m = size;
102779+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
102780+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
102781+ m[0].units = size;
102782+ m[1].units = align;
102783 ret = (void *)m + align;
102784
102785 trace_kmalloc_node(caller, ret,
102786 size, size + align, gfp, node);
102787 } else {
102788 unsigned int order = get_order(size);
102789+ struct page *page;
102790
102791 if (likely(order))
102792 gfp |= __GFP_COMP;
102793- ret = slob_new_pages(gfp, order, node);
102794+ page = slob_new_pages(gfp, order, node);
102795+ if (page) {
102796+ ret = page_address(page);
102797+ page->private = size;
102798+ }
102799
102800 trace_kmalloc_node(caller, ret,
102801 size, PAGE_SIZE << order, gfp, node);
102802 }
102803
102804- kmemleak_alloc(ret, size, 1, gfp);
102805+ return ret;
102806+}
102807+
102808+static __always_inline void *
102809+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
102810+{
102811+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
102812+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
102813+
102814+ if (!ZERO_OR_NULL_PTR(ret))
102815+ kmemleak_alloc(ret, size, 1, gfp);
102816 return ret;
102817 }
102818
102819@@ -493,34 +517,112 @@ void kfree(const void *block)
102820 return;
102821 kmemleak_free(block);
102822
102823+ VM_BUG_ON(!virt_addr_valid(block));
102824 sp = virt_to_page(block);
102825- if (PageSlab(sp)) {
102826+ VM_BUG_ON(!PageSlab(sp));
102827+ if (!sp->private) {
102828 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
102829- unsigned int *m = (unsigned int *)(block - align);
102830- slob_free(m, *m + align);
102831- } else
102832+ slob_t *m = (slob_t *)(block - align);
102833+ slob_free(m, m[0].units + align);
102834+ } else {
102835+ __ClearPageSlab(sp);
102836+ page_mapcount_reset(sp);
102837+ sp->private = 0;
102838 __free_pages(sp, compound_order(sp));
102839+ }
102840 }
102841 EXPORT_SYMBOL(kfree);
102842
102843+bool is_usercopy_object(const void *ptr)
102844+{
102845+ if (!slab_is_available())
102846+ return false;
102847+
102848+ // PAX: TODO
102849+
102850+ return false;
102851+}
102852+
102853+#ifdef CONFIG_PAX_USERCOPY
102854+const char *check_heap_object(const void *ptr, unsigned long n)
102855+{
102856+ struct page *page;
102857+ const slob_t *free;
102858+ const void *base;
102859+ unsigned long flags;
102860+
102861+ if (ZERO_OR_NULL_PTR(ptr))
102862+ return "<null>";
102863+
102864+ if (!virt_addr_valid(ptr))
102865+ return NULL;
102866+
102867+ page = virt_to_head_page(ptr);
102868+ if (!PageSlab(page))
102869+ return NULL;
102870+
102871+ if (page->private) {
102872+ base = page;
102873+ if (base <= ptr && n <= page->private - (ptr - base))
102874+ return NULL;
102875+ return "<slob>";
102876+ }
102877+
102878+ /* some tricky double walking to find the chunk */
102879+ spin_lock_irqsave(&slob_lock, flags);
102880+ base = (void *)((unsigned long)ptr & PAGE_MASK);
102881+ free = page->freelist;
102882+
102883+ while (!slob_last(free) && (void *)free <= ptr) {
102884+ base = free + slob_units(free);
102885+ free = slob_next(free);
102886+ }
102887+
102888+ while (base < (void *)free) {
102889+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
102890+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
102891+ int offset;
102892+
102893+ if (ptr < base + align)
102894+ break;
102895+
102896+ offset = ptr - base - align;
102897+ if (offset >= m) {
102898+ base += size;
102899+ continue;
102900+ }
102901+
102902+ if (n > m - offset)
102903+ break;
102904+
102905+ spin_unlock_irqrestore(&slob_lock, flags);
102906+ return NULL;
102907+ }
102908+
102909+ spin_unlock_irqrestore(&slob_lock, flags);
102910+ return "<slob>";
102911+}
102912+#endif
102913+
102914 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
102915 size_t ksize(const void *block)
102916 {
102917 struct page *sp;
102918 int align;
102919- unsigned int *m;
102920+ slob_t *m;
102921
102922 BUG_ON(!block);
102923 if (unlikely(block == ZERO_SIZE_PTR))
102924 return 0;
102925
102926 sp = virt_to_page(block);
102927- if (unlikely(!PageSlab(sp)))
102928- return PAGE_SIZE << compound_order(sp);
102929+ VM_BUG_ON(!PageSlab(sp));
102930+ if (sp->private)
102931+ return sp->private;
102932
102933 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
102934- m = (unsigned int *)(block - align);
102935- return SLOB_UNITS(*m) * SLOB_UNIT;
102936+ m = (slob_t *)(block - align);
102937+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
102938 }
102939 EXPORT_SYMBOL(ksize);
102940
102941@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
102942
102943 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
102944 {
102945- void *b;
102946+ void *b = NULL;
102947
102948 flags &= gfp_allowed_mask;
102949
102950 lockdep_trace_alloc(flags);
102951
102952+#ifdef CONFIG_PAX_USERCOPY_SLABS
102953+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
102954+#else
102955 if (c->size < PAGE_SIZE) {
102956 b = slob_alloc(c->size, flags, c->align, node);
102957 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
102958 SLOB_UNITS(c->size) * SLOB_UNIT,
102959 flags, node);
102960 } else {
102961- b = slob_new_pages(flags, get_order(c->size), node);
102962+ struct page *sp;
102963+
102964+ sp = slob_new_pages(flags, get_order(c->size), node);
102965+ if (sp) {
102966+ b = page_address(sp);
102967+ sp->private = c->size;
102968+ }
102969 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
102970 PAGE_SIZE << get_order(c->size),
102971 flags, node);
102972 }
102973+#endif
102974
102975 if (b && c->ctor)
102976 c->ctor(b);
102977@@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
102978
102979 static void __kmem_cache_free(void *b, int size)
102980 {
102981- if (size < PAGE_SIZE)
102982+ struct page *sp;
102983+
102984+ sp = virt_to_page(b);
102985+ BUG_ON(!PageSlab(sp));
102986+ if (!sp->private)
102987 slob_free(b, size);
102988 else
102989- slob_free_pages(b, get_order(size));
102990+ slob_free_pages(sp, get_order(size));
102991 }
102992
102993 static void kmem_rcu_free(struct rcu_head *head)
102994@@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_head *head)
102995
102996 void kmem_cache_free(struct kmem_cache *c, void *b)
102997 {
102998+ int size = c->size;
102999+
103000+#ifdef CONFIG_PAX_USERCOPY_SLABS
103001+ if (size + c->align < PAGE_SIZE) {
103002+ size += c->align;
103003+ b -= c->align;
103004+ }
103005+#endif
103006+
103007 kmemleak_free_recursive(b, c->flags);
103008 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
103009 struct slob_rcu *slob_rcu;
103010- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
103011- slob_rcu->size = c->size;
103012+ slob_rcu = b + (size - sizeof(struct slob_rcu));
103013+ slob_rcu->size = size;
103014 call_rcu(&slob_rcu->head, kmem_rcu_free);
103015 } else {
103016- __kmem_cache_free(b, c->size);
103017+ __kmem_cache_free(b, size);
103018 }
103019
103020+#ifdef CONFIG_PAX_USERCOPY_SLABS
103021+ trace_kfree(_RET_IP_, b);
103022+#else
103023 trace_kmem_cache_free(_RET_IP_, b);
103024+#endif
103025+
103026 }
103027 EXPORT_SYMBOL(kmem_cache_free);
103028
103029diff --git a/mm/slub.c b/mm/slub.c
103030index 7300480..cb92846 100644
103031--- a/mm/slub.c
103032+++ b/mm/slub.c
103033@@ -207,7 +207,7 @@ struct track {
103034
103035 enum track_item { TRACK_ALLOC, TRACK_FREE };
103036
103037-#ifdef CONFIG_SYSFS
103038+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103039 static int sysfs_slab_add(struct kmem_cache *);
103040 static int sysfs_slab_alias(struct kmem_cache *, const char *);
103041 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
103042@@ -546,7 +546,7 @@ static void print_track(const char *s, struct track *t)
103043 if (!t->addr)
103044 return;
103045
103046- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
103047+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
103048 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
103049 #ifdef CONFIG_STACKTRACE
103050 {
103051@@ -2673,6 +2673,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
103052
103053 slab_free_hook(s, x);
103054
103055+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103056+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
103057+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
103058+ if (s->ctor)
103059+ s->ctor(x);
103060+ }
103061+#endif
103062+
103063 redo:
103064 /*
103065 * Determine the currently cpus per cpu slab.
103066@@ -2740,7 +2748,7 @@ static int slub_min_objects;
103067 * Merge control. If this is set then no merging of slab caches will occur.
103068 * (Could be removed. This was introduced to pacify the merge skeptics.)
103069 */
103070-static int slub_nomerge;
103071+static int slub_nomerge = 1;
103072
103073 /*
103074 * Calculate the order of allocation given an slab object size.
103075@@ -3019,6 +3027,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
103076 s->inuse = size;
103077
103078 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
103079+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103080+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
103081+#endif
103082 s->ctor)) {
103083 /*
103084 * Relocate free pointer after the object if it is not
103085@@ -3347,6 +3358,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
103086 EXPORT_SYMBOL(__kmalloc_node);
103087 #endif
103088
103089+bool is_usercopy_object(const void *ptr)
103090+{
103091+ struct page *page;
103092+ struct kmem_cache *s;
103093+
103094+ if (ZERO_OR_NULL_PTR(ptr))
103095+ return false;
103096+
103097+ if (!slab_is_available())
103098+ return false;
103099+
103100+ if (!virt_addr_valid(ptr))
103101+ return false;
103102+
103103+ page = virt_to_head_page(ptr);
103104+
103105+ if (!PageSlab(page))
103106+ return false;
103107+
103108+ s = page->slab_cache;
103109+ return s->flags & SLAB_USERCOPY;
103110+}
103111+
103112+#ifdef CONFIG_PAX_USERCOPY
103113+const char *check_heap_object(const void *ptr, unsigned long n)
103114+{
103115+ struct page *page;
103116+ struct kmem_cache *s;
103117+ unsigned long offset;
103118+
103119+ if (ZERO_OR_NULL_PTR(ptr))
103120+ return "<null>";
103121+
103122+ if (!virt_addr_valid(ptr))
103123+ return NULL;
103124+
103125+ page = virt_to_head_page(ptr);
103126+
103127+ if (!PageSlab(page))
103128+ return NULL;
103129+
103130+ s = page->slab_cache;
103131+ if (!(s->flags & SLAB_USERCOPY))
103132+ return s->name;
103133+
103134+ offset = (ptr - page_address(page)) % s->size;
103135+ if (offset <= s->object_size && n <= s->object_size - offset)
103136+ return NULL;
103137+
103138+ return s->name;
103139+}
103140+#endif
103141+
103142 size_t ksize(const void *object)
103143 {
103144 struct page *page;
103145@@ -3375,6 +3439,7 @@ void kfree(const void *x)
103146 if (unlikely(ZERO_OR_NULL_PTR(x)))
103147 return;
103148
103149+ VM_BUG_ON(!virt_addr_valid(x));
103150 page = virt_to_head_page(x);
103151 if (unlikely(!PageSlab(page))) {
103152 BUG_ON(!PageCompound(page));
103153@@ -3680,7 +3745,7 @@ static int slab_unmergeable(struct kmem_cache *s)
103154 /*
103155 * We may have set a slab to be unmergeable during bootstrap.
103156 */
103157- if (s->refcount < 0)
103158+ if (atomic_read(&s->refcount) < 0)
103159 return 1;
103160
103161 return 0;
103162@@ -3737,7 +3802,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
103163 int i;
103164 struct kmem_cache *c;
103165
103166- s->refcount++;
103167+ atomic_inc(&s->refcount);
103168
103169 /*
103170 * Adjust the object sizes so that we clear
103171@@ -3756,7 +3821,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
103172 }
103173
103174 if (sysfs_slab_alias(s, name)) {
103175- s->refcount--;
103176+ atomic_dec(&s->refcount);
103177 s = NULL;
103178 }
103179 }
103180@@ -3873,7 +3938,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
103181 }
103182 #endif
103183
103184-#ifdef CONFIG_SYSFS
103185+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103186 static int count_inuse(struct page *page)
103187 {
103188 return page->inuse;
103189@@ -4156,7 +4221,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
103190 len += sprintf(buf + len, "%7ld ", l->count);
103191
103192 if (l->addr)
103193+#ifdef CONFIG_GRKERNSEC_HIDESYM
103194+ len += sprintf(buf + len, "%pS", NULL);
103195+#else
103196 len += sprintf(buf + len, "%pS", (void *)l->addr);
103197+#endif
103198 else
103199 len += sprintf(buf + len, "<not-available>");
103200
103201@@ -4258,12 +4327,12 @@ static void resiliency_test(void)
103202 validate_slab_cache(kmalloc_caches[9]);
103203 }
103204 #else
103205-#ifdef CONFIG_SYSFS
103206+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103207 static void resiliency_test(void) {};
103208 #endif
103209 #endif
103210
103211-#ifdef CONFIG_SYSFS
103212+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103213 enum slab_stat_type {
103214 SL_ALL, /* All slabs */
103215 SL_PARTIAL, /* Only partially allocated slabs */
103216@@ -4503,13 +4572,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
103217 {
103218 if (!s->ctor)
103219 return 0;
103220+#ifdef CONFIG_GRKERNSEC_HIDESYM
103221+ return sprintf(buf, "%pS\n", NULL);
103222+#else
103223 return sprintf(buf, "%pS\n", s->ctor);
103224+#endif
103225 }
103226 SLAB_ATTR_RO(ctor);
103227
103228 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
103229 {
103230- return sprintf(buf, "%d\n", s->refcount - 1);
103231+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
103232 }
103233 SLAB_ATTR_RO(aliases);
103234
103235@@ -4597,6 +4670,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
103236 SLAB_ATTR_RO(cache_dma);
103237 #endif
103238
103239+#ifdef CONFIG_PAX_USERCOPY_SLABS
103240+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
103241+{
103242+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
103243+}
103244+SLAB_ATTR_RO(usercopy);
103245+#endif
103246+
103247 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
103248 {
103249 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
103250@@ -4931,6 +5012,9 @@ static struct attribute *slab_attrs[] = {
103251 #ifdef CONFIG_ZONE_DMA
103252 &cache_dma_attr.attr,
103253 #endif
103254+#ifdef CONFIG_PAX_USERCOPY_SLABS
103255+ &usercopy_attr.attr,
103256+#endif
103257 #ifdef CONFIG_NUMA
103258 &remote_node_defrag_ratio_attr.attr,
103259 #endif
103260@@ -5181,6 +5265,7 @@ static char *create_unique_id(struct kmem_cache *s)
103261 return name;
103262 }
103263
103264+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103265 static int sysfs_slab_add(struct kmem_cache *s)
103266 {
103267 int err;
103268@@ -5254,6 +5339,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
103269 kobject_del(&s->kobj);
103270 kobject_put(&s->kobj);
103271 }
103272+#endif
103273
103274 /*
103275 * Need to buffer aliases during bootup until sysfs becomes
103276@@ -5267,6 +5353,7 @@ struct saved_alias {
103277
103278 static struct saved_alias *alias_list;
103279
103280+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103281 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
103282 {
103283 struct saved_alias *al;
103284@@ -5289,6 +5376,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
103285 alias_list = al;
103286 return 0;
103287 }
103288+#endif
103289
103290 static int __init slab_sysfs_init(void)
103291 {
103292diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
103293index 4cba9c2..b4f9fcc 100644
103294--- a/mm/sparse-vmemmap.c
103295+++ b/mm/sparse-vmemmap.c
103296@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
103297 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
103298 if (!p)
103299 return NULL;
103300- pud_populate(&init_mm, pud, p);
103301+ pud_populate_kernel(&init_mm, pud, p);
103302 }
103303 return pud;
103304 }
103305@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
103306 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
103307 if (!p)
103308 return NULL;
103309- pgd_populate(&init_mm, pgd, p);
103310+ pgd_populate_kernel(&init_mm, pgd, p);
103311 }
103312 return pgd;
103313 }
103314diff --git a/mm/sparse.c b/mm/sparse.c
103315index d1b48b6..6e8590e 100644
103316--- a/mm/sparse.c
103317+++ b/mm/sparse.c
103318@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
103319
103320 for (i = 0; i < PAGES_PER_SECTION; i++) {
103321 if (PageHWPoison(&memmap[i])) {
103322- atomic_long_sub(1, &num_poisoned_pages);
103323+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
103324 ClearPageHWPoison(&memmap[i]);
103325 }
103326 }
103327diff --git a/mm/swap.c b/mm/swap.c
103328index 9e8e347..3c22e0f 100644
103329--- a/mm/swap.c
103330+++ b/mm/swap.c
103331@@ -31,6 +31,7 @@
103332 #include <linux/memcontrol.h>
103333 #include <linux/gfp.h>
103334 #include <linux/uio.h>
103335+#include <linux/hugetlb.h>
103336
103337 #include "internal.h"
103338
103339@@ -76,6 +77,8 @@ static void __put_compound_page(struct page *page)
103340
103341 __page_cache_release(page);
103342 dtor = get_compound_page_dtor(page);
103343+ if (!PageHuge(page))
103344+ BUG_ON(dtor != free_compound_page);
103345 (*dtor)(page);
103346 }
103347
103348diff --git a/mm/swapfile.c b/mm/swapfile.c
103349index 4c524f7..f7601f17 100644
103350--- a/mm/swapfile.c
103351+++ b/mm/swapfile.c
103352@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
103353
103354 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
103355 /* Activity counter to indicate that a swapon or swapoff has occurred */
103356-static atomic_t proc_poll_event = ATOMIC_INIT(0);
103357+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
103358
103359 static inline unsigned char swap_count(unsigned char ent)
103360 {
103361@@ -1945,7 +1945,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
103362 spin_unlock(&swap_lock);
103363
103364 err = 0;
103365- atomic_inc(&proc_poll_event);
103366+ atomic_inc_unchecked(&proc_poll_event);
103367 wake_up_interruptible(&proc_poll_wait);
103368
103369 out_dput:
103370@@ -1962,8 +1962,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
103371
103372 poll_wait(file, &proc_poll_wait, wait);
103373
103374- if (seq->poll_event != atomic_read(&proc_poll_event)) {
103375- seq->poll_event = atomic_read(&proc_poll_event);
103376+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
103377+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
103378 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
103379 }
103380
103381@@ -2061,7 +2061,7 @@ static int swaps_open(struct inode *inode, struct file *file)
103382 return ret;
103383
103384 seq = file->private_data;
103385- seq->poll_event = atomic_read(&proc_poll_event);
103386+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
103387 return 0;
103388 }
103389
103390@@ -2521,7 +2521,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
103391 (frontswap_map) ? "FS" : "");
103392
103393 mutex_unlock(&swapon_mutex);
103394- atomic_inc(&proc_poll_event);
103395+ atomic_inc_unchecked(&proc_poll_event);
103396 wake_up_interruptible(&proc_poll_wait);
103397
103398 if (S_ISREG(inode->i_mode))
103399diff --git a/mm/util.c b/mm/util.c
103400index d5ea733..e8953f9 100644
103401--- a/mm/util.c
103402+++ b/mm/util.c
103403@@ -299,6 +299,12 @@ done:
103404 void arch_pick_mmap_layout(struct mm_struct *mm)
103405 {
103406 mm->mmap_base = TASK_UNMAPPED_BASE;
103407+
103408+#ifdef CONFIG_PAX_RANDMMAP
103409+ if (mm->pax_flags & MF_PAX_RANDMMAP)
103410+ mm->mmap_base += mm->delta_mmap;
103411+#endif
103412+
103413 mm->get_unmapped_area = arch_get_unmapped_area;
103414 }
103415 #endif
103416@@ -475,6 +481,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
103417 if (!mm->arg_end)
103418 goto out_mm; /* Shh! No looking before we're done */
103419
103420+ if (gr_acl_handle_procpidmem(task))
103421+ goto out_mm;
103422+
103423 len = mm->arg_end - mm->arg_start;
103424
103425 if (len > buflen)
103426diff --git a/mm/vmalloc.c b/mm/vmalloc.c
103427index f64632b..e8c52e7 100644
103428--- a/mm/vmalloc.c
103429+++ b/mm/vmalloc.c
103430@@ -40,6 +40,21 @@ struct vfree_deferred {
103431 };
103432 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
103433
103434+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
103435+struct stack_deferred_llist {
103436+ struct llist_head list;
103437+ void *stack;
103438+ void *lowmem_stack;
103439+};
103440+
103441+struct stack_deferred {
103442+ struct stack_deferred_llist list;
103443+ struct work_struct wq;
103444+};
103445+
103446+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
103447+#endif
103448+
103449 static void __vunmap(const void *, int);
103450
103451 static void free_work(struct work_struct *w)
103452@@ -47,12 +62,30 @@ static void free_work(struct work_struct *w)
103453 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
103454 struct llist_node *llnode = llist_del_all(&p->list);
103455 while (llnode) {
103456- void *p = llnode;
103457+ void *x = llnode;
103458 llnode = llist_next(llnode);
103459- __vunmap(p, 1);
103460+ __vunmap(x, 1);
103461 }
103462 }
103463
103464+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
103465+static void unmap_work(struct work_struct *w)
103466+{
103467+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
103468+ struct llist_node *llnode = llist_del_all(&p->list.list);
103469+ while (llnode) {
103470+ struct stack_deferred_llist *x =
103471+ llist_entry((struct llist_head *)llnode,
103472+ struct stack_deferred_llist, list);
103473+ void *stack = ACCESS_ONCE(x->stack);
103474+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
103475+ llnode = llist_next(llnode);
103476+ __vunmap(stack, 0);
103477+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
103478+ }
103479+}
103480+#endif
103481+
103482 /*** Page table manipulation functions ***/
103483
103484 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
103485@@ -61,8 +94,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
103486
103487 pte = pte_offset_kernel(pmd, addr);
103488 do {
103489- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
103490- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
103491+
103492+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
103493+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
103494+ BUG_ON(!pte_exec(*pte));
103495+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
103496+ continue;
103497+ }
103498+#endif
103499+
103500+ {
103501+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
103502+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
103503+ }
103504 } while (pte++, addr += PAGE_SIZE, addr != end);
103505 }
103506
103507@@ -122,16 +166,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
103508 pte = pte_alloc_kernel(pmd, addr);
103509 if (!pte)
103510 return -ENOMEM;
103511+
103512+ pax_open_kernel();
103513 do {
103514 struct page *page = pages[*nr];
103515
103516- if (WARN_ON(!pte_none(*pte)))
103517+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
103518+ if (pgprot_val(prot) & _PAGE_NX)
103519+#endif
103520+
103521+ if (!pte_none(*pte)) {
103522+ pax_close_kernel();
103523+ WARN_ON(1);
103524 return -EBUSY;
103525- if (WARN_ON(!page))
103526+ }
103527+ if (!page) {
103528+ pax_close_kernel();
103529+ WARN_ON(1);
103530 return -ENOMEM;
103531+ }
103532 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
103533 (*nr)++;
103534 } while (pte++, addr += PAGE_SIZE, addr != end);
103535+ pax_close_kernel();
103536 return 0;
103537 }
103538
103539@@ -141,7 +198,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
103540 pmd_t *pmd;
103541 unsigned long next;
103542
103543- pmd = pmd_alloc(&init_mm, pud, addr);
103544+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
103545 if (!pmd)
103546 return -ENOMEM;
103547 do {
103548@@ -158,7 +215,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
103549 pud_t *pud;
103550 unsigned long next;
103551
103552- pud = pud_alloc(&init_mm, pgd, addr);
103553+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
103554 if (!pud)
103555 return -ENOMEM;
103556 do {
103557@@ -218,6 +275,12 @@ int is_vmalloc_or_module_addr(const void *x)
103558 if (addr >= MODULES_VADDR && addr < MODULES_END)
103559 return 1;
103560 #endif
103561+
103562+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
103563+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
103564+ return 1;
103565+#endif
103566+
103567 return is_vmalloc_addr(x);
103568 }
103569
103570@@ -238,8 +301,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
103571
103572 if (!pgd_none(*pgd)) {
103573 pud_t *pud = pud_offset(pgd, addr);
103574+#ifdef CONFIG_X86
103575+ if (!pud_large(*pud))
103576+#endif
103577 if (!pud_none(*pud)) {
103578 pmd_t *pmd = pmd_offset(pud, addr);
103579+#ifdef CONFIG_X86
103580+ if (!pmd_large(*pmd))
103581+#endif
103582 if (!pmd_none(*pmd)) {
103583 pte_t *ptep, pte;
103584
103585@@ -1183,13 +1252,23 @@ void __init vmalloc_init(void)
103586 for_each_possible_cpu(i) {
103587 struct vmap_block_queue *vbq;
103588 struct vfree_deferred *p;
103589+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
103590+ struct stack_deferred *p2;
103591+#endif
103592
103593 vbq = &per_cpu(vmap_block_queue, i);
103594 spin_lock_init(&vbq->lock);
103595 INIT_LIST_HEAD(&vbq->free);
103596+
103597 p = &per_cpu(vfree_deferred, i);
103598 init_llist_head(&p->list);
103599 INIT_WORK(&p->wq, free_work);
103600+
103601+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
103602+ p2 = &per_cpu(stack_deferred, i);
103603+ init_llist_head(&p2->list.list);
103604+ INIT_WORK(&p2->wq, unmap_work);
103605+#endif
103606 }
103607
103608 /* Import existing vmlist entries. */
103609@@ -1318,6 +1397,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
103610 struct vm_struct *area;
103611
103612 BUG_ON(in_interrupt());
103613+
103614+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
103615+ if (flags & VM_KERNEXEC) {
103616+ if (start != VMALLOC_START || end != VMALLOC_END)
103617+ return NULL;
103618+ start = (unsigned long)MODULES_EXEC_VADDR;
103619+ end = (unsigned long)MODULES_EXEC_END;
103620+ }
103621+#endif
103622+
103623 if (flags & VM_IOREMAP)
103624 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
103625
103626@@ -1523,6 +1612,23 @@ void vunmap(const void *addr)
103627 }
103628 EXPORT_SYMBOL(vunmap);
103629
103630+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
103631+void unmap_process_stacks(struct task_struct *task)
103632+{
103633+ if (unlikely(in_interrupt())) {
103634+ struct stack_deferred *p = &__get_cpu_var(stack_deferred);
103635+ struct stack_deferred_llist *list = task->stack;
103636+ list->stack = task->stack;
103637+ list->lowmem_stack = task->lowmem_stack;
103638+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
103639+ schedule_work(&p->wq);
103640+ } else {
103641+ __vunmap(task->stack, 0);
103642+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
103643+ }
103644+}
103645+#endif
103646+
103647 /**
103648 * vmap - map an array of pages into virtually contiguous space
103649 * @pages: array of page pointers
103650@@ -1543,6 +1649,11 @@ void *vmap(struct page **pages, unsigned int count,
103651 if (count > totalram_pages)
103652 return NULL;
103653
103654+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
103655+ if (!(pgprot_val(prot) & _PAGE_NX))
103656+ flags |= VM_KERNEXEC;
103657+#endif
103658+
103659 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
103660 __builtin_return_address(0));
103661 if (!area)
103662@@ -1643,6 +1754,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
103663 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
103664 goto fail;
103665
103666+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
103667+ if (!(pgprot_val(prot) & _PAGE_NX))
103668+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
103669+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
103670+ else
103671+#endif
103672+
103673 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
103674 start, end, node, gfp_mask, caller);
103675 if (!area)
103676@@ -1819,10 +1937,9 @@ EXPORT_SYMBOL(vzalloc_node);
103677 * For tight control over page level allocator and protection flags
103678 * use __vmalloc() instead.
103679 */
103680-
103681 void *vmalloc_exec(unsigned long size)
103682 {
103683- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
103684+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
103685 NUMA_NO_NODE, __builtin_return_address(0));
103686 }
103687
103688@@ -2129,6 +2246,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
103689 {
103690 struct vm_struct *area;
103691
103692+ BUG_ON(vma->vm_mirror);
103693+
103694 size = PAGE_ALIGN(size);
103695
103696 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
103697@@ -2611,7 +2730,11 @@ static int s_show(struct seq_file *m, void *p)
103698 v->addr, v->addr + v->size, v->size);
103699
103700 if (v->caller)
103701+#ifdef CONFIG_GRKERNSEC_HIDESYM
103702+ seq_printf(m, " %pK", v->caller);
103703+#else
103704 seq_printf(m, " %pS", v->caller);
103705+#endif
103706
103707 if (v->nr_pages)
103708 seq_printf(m, " pages=%d", v->nr_pages);
103709diff --git a/mm/vmstat.c b/mm/vmstat.c
103710index b37bd49..4d7b3da 100644
103711--- a/mm/vmstat.c
103712+++ b/mm/vmstat.c
103713@@ -20,6 +20,7 @@
103714 #include <linux/writeback.h>
103715 #include <linux/compaction.h>
103716 #include <linux/mm_inline.h>
103717+#include <linux/grsecurity.h>
103718
103719 #include "internal.h"
103720
103721@@ -79,7 +80,7 @@ void vm_events_fold_cpu(int cpu)
103722 *
103723 * vm_stat contains the global counters
103724 */
103725-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
103726+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
103727 EXPORT_SYMBOL(vm_stat);
103728
103729 #ifdef CONFIG_SMP
103730@@ -425,7 +426,7 @@ static inline void fold_diff(int *diff)
103731
103732 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
103733 if (diff[i])
103734- atomic_long_add(diff[i], &vm_stat[i]);
103735+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
103736 }
103737
103738 /*
103739@@ -457,7 +458,7 @@ static void refresh_cpu_vm_stats(void)
103740 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
103741 if (v) {
103742
103743- atomic_long_add(v, &zone->vm_stat[i]);
103744+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
103745 global_diff[i] += v;
103746 #ifdef CONFIG_NUMA
103747 /* 3 seconds idle till flush */
103748@@ -519,7 +520,7 @@ void cpu_vm_stats_fold(int cpu)
103749
103750 v = p->vm_stat_diff[i];
103751 p->vm_stat_diff[i] = 0;
103752- atomic_long_add(v, &zone->vm_stat[i]);
103753+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
103754 global_diff[i] += v;
103755 }
103756 }
103757@@ -539,8 +540,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
103758 if (pset->vm_stat_diff[i]) {
103759 int v = pset->vm_stat_diff[i];
103760 pset->vm_stat_diff[i] = 0;
103761- atomic_long_add(v, &zone->vm_stat[i]);
103762- atomic_long_add(v, &vm_stat[i]);
103763+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
103764+ atomic_long_add_unchecked(v, &vm_stat[i]);
103765 }
103766 }
103767 #endif
103768@@ -1162,10 +1163,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
103769 stat_items_size += sizeof(struct vm_event_state);
103770 #endif
103771
103772- v = kmalloc(stat_items_size, GFP_KERNEL);
103773+ v = kzalloc(stat_items_size, GFP_KERNEL);
103774 m->private = v;
103775 if (!v)
103776 return ERR_PTR(-ENOMEM);
103777+
103778+#ifdef CONFIG_GRKERNSEC_PROC_ADD
103779+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
103780+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
103781+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
103782+ && !in_group_p(grsec_proc_gid)
103783+#endif
103784+ )
103785+ return (unsigned long *)m->private + *pos;
103786+#endif
103787+#endif
103788+
103789 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
103790 v[i] = global_page_state(i);
103791 v += NR_VM_ZONE_STAT_ITEMS;
103792@@ -1314,10 +1327,16 @@ static int __init setup_vmstat(void)
103793 cpu_notifier_register_done();
103794 #endif
103795 #ifdef CONFIG_PROC_FS
103796- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
103797- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
103798- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
103799- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
103800+ {
103801+ mode_t gr_mode = S_IRUGO;
103802+#ifdef CONFIG_GRKERNSEC_PROC_ADD
103803+ gr_mode = S_IRUSR;
103804+#endif
103805+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
103806+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
103807+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
103808+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
103809+ }
103810 #endif
103811 return 0;
103812 }
103813diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
103814index 44ebd5c..1f732bae 100644
103815--- a/net/8021q/vlan.c
103816+++ b/net/8021q/vlan.c
103817@@ -475,7 +475,7 @@ out:
103818 return NOTIFY_DONE;
103819 }
103820
103821-static struct notifier_block vlan_notifier_block __read_mostly = {
103822+static struct notifier_block vlan_notifier_block = {
103823 .notifier_call = vlan_device_event,
103824 };
103825
103826@@ -550,8 +550,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
103827 err = -EPERM;
103828 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
103829 break;
103830- if ((args.u.name_type >= 0) &&
103831- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
103832+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
103833 struct vlan_net *vn;
103834
103835 vn = net_generic(net, vlan_net_id);
103836diff --git a/net/9p/client.c b/net/9p/client.c
103837index 0004cba..feba240 100644
103838--- a/net/9p/client.c
103839+++ b/net/9p/client.c
103840@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
103841 len - inline_len);
103842 } else {
103843 err = copy_from_user(ename + inline_len,
103844- uidata, len - inline_len);
103845+ (char __force_user *)uidata, len - inline_len);
103846 if (err) {
103847 err = -EFAULT;
103848 goto out_err;
103849@@ -1571,7 +1571,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
103850 kernel_buf = 1;
103851 indata = data;
103852 } else
103853- indata = (__force char *)udata;
103854+ indata = (__force_kernel char *)udata;
103855 /*
103856 * response header len is 11
103857 * PDU Header(7) + IO Size (4)
103858@@ -1646,7 +1646,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
103859 kernel_buf = 1;
103860 odata = data;
103861 } else
103862- odata = (char *)udata;
103863+ odata = (char __force_kernel *)udata;
103864 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
103865 P9_ZC_HDR_SZ, kernel_buf, "dqd",
103866 fid->fid, offset, rsize);
103867diff --git a/net/9p/mod.c b/net/9p/mod.c
103868index 6ab36ae..6f1841b 100644
103869--- a/net/9p/mod.c
103870+++ b/net/9p/mod.c
103871@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
103872 void v9fs_register_trans(struct p9_trans_module *m)
103873 {
103874 spin_lock(&v9fs_trans_lock);
103875- list_add_tail(&m->list, &v9fs_trans_list);
103876+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
103877 spin_unlock(&v9fs_trans_lock);
103878 }
103879 EXPORT_SYMBOL(v9fs_register_trans);
103880@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
103881 void v9fs_unregister_trans(struct p9_trans_module *m)
103882 {
103883 spin_lock(&v9fs_trans_lock);
103884- list_del_init(&m->list);
103885+ pax_list_del_init((struct list_head *)&m->list);
103886 spin_unlock(&v9fs_trans_lock);
103887 }
103888 EXPORT_SYMBOL(v9fs_unregister_trans);
103889diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
103890index 80d08f6..de63fd1 100644
103891--- a/net/9p/trans_fd.c
103892+++ b/net/9p/trans_fd.c
103893@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
103894 oldfs = get_fs();
103895 set_fs(get_ds());
103896 /* The cast to a user pointer is valid due to the set_fs() */
103897- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
103898+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
103899 set_fs(oldfs);
103900
103901 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
103902diff --git a/net/Kconfig b/net/Kconfig
103903index d92afe4..ab63892 100644
103904--- a/net/Kconfig
103905+++ b/net/Kconfig
103906@@ -89,12 +89,8 @@ config NETWORK_SECMARK
103907 to nfmark, but designated for security purposes.
103908 If you are unsure how to answer this question, answer N.
103909
103910-config NET_PTP_CLASSIFY
103911- def_bool n
103912-
103913 config NETWORK_PHY_TIMESTAMPING
103914 bool "Timestamping in PHY devices"
103915- select NET_PTP_CLASSIFY
103916 help
103917 This allows timestamping of network packets by PHYs with
103918 hardware timestamping capabilities. This option adds some
103919@@ -269,7 +265,7 @@ config BQL
103920 config BPF_JIT
103921 bool "enable BPF Just In Time compiler"
103922 depends on HAVE_BPF_JIT
103923- depends on MODULES
103924+ depends on MODULES && X86
103925 ---help---
103926 Berkeley Packet Filter filtering capabilities are normally handled
103927 by an interpreter. This option allows kernel to generate a native
103928diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
103929index af46bc4..f9adfcd 100644
103930--- a/net/appletalk/atalk_proc.c
103931+++ b/net/appletalk/atalk_proc.c
103932@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
103933 struct proc_dir_entry *p;
103934 int rc = -ENOMEM;
103935
103936- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
103937+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
103938 if (!atalk_proc_dir)
103939 goto out;
103940
103941diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
103942index 876fbe8..8bbea9f 100644
103943--- a/net/atm/atm_misc.c
103944+++ b/net/atm/atm_misc.c
103945@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
103946 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
103947 return 1;
103948 atm_return(vcc, truesize);
103949- atomic_inc(&vcc->stats->rx_drop);
103950+ atomic_inc_unchecked(&vcc->stats->rx_drop);
103951 return 0;
103952 }
103953 EXPORT_SYMBOL(atm_charge);
103954@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
103955 }
103956 }
103957 atm_return(vcc, guess);
103958- atomic_inc(&vcc->stats->rx_drop);
103959+ atomic_inc_unchecked(&vcc->stats->rx_drop);
103960 return NULL;
103961 }
103962 EXPORT_SYMBOL(atm_alloc_charge);
103963@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
103964
103965 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
103966 {
103967-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
103968+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
103969 __SONET_ITEMS
103970 #undef __HANDLE_ITEM
103971 }
103972@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
103973
103974 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
103975 {
103976-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
103977+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
103978 __SONET_ITEMS
103979 #undef __HANDLE_ITEM
103980 }
103981diff --git a/net/atm/lec.c b/net/atm/lec.c
103982index 4c5b8ba..95f7005 100644
103983--- a/net/atm/lec.c
103984+++ b/net/atm/lec.c
103985@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
103986 }
103987
103988 static struct lane2_ops lane2_ops = {
103989- lane2_resolve, /* resolve, spec 3.1.3 */
103990- lane2_associate_req, /* associate_req, spec 3.1.4 */
103991- NULL /* associate indicator, spec 3.1.5 */
103992+ .resolve = lane2_resolve,
103993+ .associate_req = lane2_associate_req,
103994+ .associate_indicator = NULL
103995 };
103996
103997 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
103998diff --git a/net/atm/lec.h b/net/atm/lec.h
103999index 4149db1..f2ab682 100644
104000--- a/net/atm/lec.h
104001+++ b/net/atm/lec.h
104002@@ -48,7 +48,7 @@ struct lane2_ops {
104003 const u8 *tlvs, u32 sizeoftlvs);
104004 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
104005 const u8 *tlvs, u32 sizeoftlvs);
104006-};
104007+} __no_const;
104008
104009 /*
104010 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
104011diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
104012index d1b2d9a..d549f7f 100644
104013--- a/net/atm/mpoa_caches.c
104014+++ b/net/atm/mpoa_caches.c
104015@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
104016
104017
104018 static struct in_cache_ops ingress_ops = {
104019- in_cache_add_entry, /* add_entry */
104020- in_cache_get, /* get */
104021- in_cache_get_with_mask, /* get_with_mask */
104022- in_cache_get_by_vcc, /* get_by_vcc */
104023- in_cache_put, /* put */
104024- in_cache_remove_entry, /* remove_entry */
104025- cache_hit, /* cache_hit */
104026- clear_count_and_expired, /* clear_count */
104027- check_resolving_entries, /* check_resolving */
104028- refresh_entries, /* refresh */
104029- in_destroy_cache /* destroy_cache */
104030+ .add_entry = in_cache_add_entry,
104031+ .get = in_cache_get,
104032+ .get_with_mask = in_cache_get_with_mask,
104033+ .get_by_vcc = in_cache_get_by_vcc,
104034+ .put = in_cache_put,
104035+ .remove_entry = in_cache_remove_entry,
104036+ .cache_hit = cache_hit,
104037+ .clear_count = clear_count_and_expired,
104038+ .check_resolving = check_resolving_entries,
104039+ .refresh = refresh_entries,
104040+ .destroy_cache = in_destroy_cache
104041 };
104042
104043 static struct eg_cache_ops egress_ops = {
104044- eg_cache_add_entry, /* add_entry */
104045- eg_cache_get_by_cache_id, /* get_by_cache_id */
104046- eg_cache_get_by_tag, /* get_by_tag */
104047- eg_cache_get_by_vcc, /* get_by_vcc */
104048- eg_cache_get_by_src_ip, /* get_by_src_ip */
104049- eg_cache_put, /* put */
104050- eg_cache_remove_entry, /* remove_entry */
104051- update_eg_cache_entry, /* update */
104052- clear_expired, /* clear_expired */
104053- eg_destroy_cache /* destroy_cache */
104054+ .add_entry = eg_cache_add_entry,
104055+ .get_by_cache_id = eg_cache_get_by_cache_id,
104056+ .get_by_tag = eg_cache_get_by_tag,
104057+ .get_by_vcc = eg_cache_get_by_vcc,
104058+ .get_by_src_ip = eg_cache_get_by_src_ip,
104059+ .put = eg_cache_put,
104060+ .remove_entry = eg_cache_remove_entry,
104061+ .update = update_eg_cache_entry,
104062+ .clear_expired = clear_expired,
104063+ .destroy_cache = eg_destroy_cache
104064 };
104065
104066
104067diff --git a/net/atm/proc.c b/net/atm/proc.c
104068index bbb6461..cf04016 100644
104069--- a/net/atm/proc.c
104070+++ b/net/atm/proc.c
104071@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
104072 const struct k_atm_aal_stats *stats)
104073 {
104074 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
104075- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
104076- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
104077- atomic_read(&stats->rx_drop));
104078+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
104079+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
104080+ atomic_read_unchecked(&stats->rx_drop));
104081 }
104082
104083 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
104084diff --git a/net/atm/resources.c b/net/atm/resources.c
104085index 0447d5d..3cf4728 100644
104086--- a/net/atm/resources.c
104087+++ b/net/atm/resources.c
104088@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
104089 static void copy_aal_stats(struct k_atm_aal_stats *from,
104090 struct atm_aal_stats *to)
104091 {
104092-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
104093+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
104094 __AAL_STAT_ITEMS
104095 #undef __HANDLE_ITEM
104096 }
104097@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
104098 static void subtract_aal_stats(struct k_atm_aal_stats *from,
104099 struct atm_aal_stats *to)
104100 {
104101-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
104102+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
104103 __AAL_STAT_ITEMS
104104 #undef __HANDLE_ITEM
104105 }
104106diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
104107index 919a5ce..cc6b444 100644
104108--- a/net/ax25/sysctl_net_ax25.c
104109+++ b/net/ax25/sysctl_net_ax25.c
104110@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
104111 {
104112 char path[sizeof("net/ax25/") + IFNAMSIZ];
104113 int k;
104114- struct ctl_table *table;
104115+ ctl_table_no_const *table;
104116
104117 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
104118 if (!table)
104119diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
104120index f04224c..f326579 100644
104121--- a/net/batman-adv/bat_iv_ogm.c
104122+++ b/net/batman-adv/bat_iv_ogm.c
104123@@ -312,7 +312,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
104124
104125 /* randomize initial seqno to avoid collision */
104126 get_random_bytes(&random_seqno, sizeof(random_seqno));
104127- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
104128+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
104129
104130 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
104131 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
104132@@ -917,9 +917,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
104133 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
104134
104135 /* change sequence number to network order */
104136- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
104137+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
104138 batadv_ogm_packet->seqno = htonl(seqno);
104139- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
104140+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
104141
104142 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
104143
104144@@ -1596,7 +1596,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
104145 return;
104146
104147 /* could be changed by schedule_own_packet() */
104148- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
104149+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
104150
104151 if (ogm_packet->flags & BATADV_DIRECTLINK)
104152 has_directlink_flag = true;
104153diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
104154index 022d18a..919daff 100644
104155--- a/net/batman-adv/fragmentation.c
104156+++ b/net/batman-adv/fragmentation.c
104157@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
104158 frag_header.packet_type = BATADV_UNICAST_FRAG;
104159 frag_header.version = BATADV_COMPAT_VERSION;
104160 frag_header.ttl = BATADV_TTL;
104161- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
104162+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
104163 frag_header.reserved = 0;
104164 frag_header.no = 0;
104165 frag_header.total_size = htons(skb->len);
104166diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
104167index cbd677f..b783347 100644
104168--- a/net/batman-adv/soft-interface.c
104169+++ b/net/batman-adv/soft-interface.c
104170@@ -296,7 +296,7 @@ send:
104171 primary_if->net_dev->dev_addr);
104172
104173 /* set broadcast sequence number */
104174- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
104175+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
104176 bcast_packet->seqno = htonl(seqno);
104177
104178 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
104179@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
104180 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
104181
104182 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
104183- atomic_set(&bat_priv->bcast_seqno, 1);
104184+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
104185 atomic_set(&bat_priv->tt.vn, 0);
104186 atomic_set(&bat_priv->tt.local_changes, 0);
104187 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
104188@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
104189
104190 /* randomize initial seqno to avoid collision */
104191 get_random_bytes(&random_seqno, sizeof(random_seqno));
104192- atomic_set(&bat_priv->frag_seqno, random_seqno);
104193+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
104194
104195 bat_priv->primary_if = NULL;
104196 bat_priv->num_ifaces = 0;
104197diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
104198index 8854c05..ee5d5497 100644
104199--- a/net/batman-adv/types.h
104200+++ b/net/batman-adv/types.h
104201@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
104202 struct batadv_hard_iface_bat_iv {
104203 unsigned char *ogm_buff;
104204 int ogm_buff_len;
104205- atomic_t ogm_seqno;
104206+ atomic_unchecked_t ogm_seqno;
104207 };
104208
104209 /**
104210@@ -768,7 +768,7 @@ struct batadv_priv {
104211 atomic_t bonding;
104212 atomic_t fragmentation;
104213 atomic_t packet_size_max;
104214- atomic_t frag_seqno;
104215+ atomic_unchecked_t frag_seqno;
104216 #ifdef CONFIG_BATMAN_ADV_BLA
104217 atomic_t bridge_loop_avoidance;
104218 #endif
104219@@ -787,7 +787,7 @@ struct batadv_priv {
104220 #endif
104221 uint32_t isolation_mark;
104222 uint32_t isolation_mark_mask;
104223- atomic_t bcast_seqno;
104224+ atomic_unchecked_t bcast_seqno;
104225 atomic_t bcast_queue_left;
104226 atomic_t batman_queue_left;
104227 char num_ifaces;
104228diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
104229index 80d25c1..aa99a98 100644
104230--- a/net/bluetooth/hci_sock.c
104231+++ b/net/bluetooth/hci_sock.c
104232@@ -1044,7 +1044,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
104233 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
104234 }
104235
104236- len = min_t(unsigned int, len, sizeof(uf));
104237+ len = min((size_t)len, sizeof(uf));
104238 if (copy_from_user(&uf, optval, len)) {
104239 err = -EFAULT;
104240 break;
104241diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
104242index 323f23c..5e27529 100644
104243--- a/net/bluetooth/l2cap_core.c
104244+++ b/net/bluetooth/l2cap_core.c
104245@@ -3548,8 +3548,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
104246 break;
104247
104248 case L2CAP_CONF_RFC:
104249- if (olen == sizeof(rfc))
104250- memcpy(&rfc, (void *)val, olen);
104251+ if (olen != sizeof(rfc))
104252+ break;
104253+
104254+ memcpy(&rfc, (void *)val, olen);
104255
104256 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
104257 rfc.mode != chan->mode)
104258diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
104259index e137869..33f3ebd 100644
104260--- a/net/bluetooth/l2cap_sock.c
104261+++ b/net/bluetooth/l2cap_sock.c
104262@@ -628,7 +628,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
104263 struct sock *sk = sock->sk;
104264 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
104265 struct l2cap_options opts;
104266- int len, err = 0;
104267+ int err = 0;
104268+ size_t len = optlen;
104269 u32 opt;
104270
104271 BT_DBG("sk %p", sk);
104272@@ -655,7 +656,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
104273 opts.max_tx = chan->max_tx;
104274 opts.txwin_size = chan->tx_win;
104275
104276- len = min_t(unsigned int, sizeof(opts), optlen);
104277+ len = min(sizeof(opts), len);
104278 if (copy_from_user((char *) &opts, optval, len)) {
104279 err = -EFAULT;
104280 break;
104281@@ -742,7 +743,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
104282 struct bt_security sec;
104283 struct bt_power pwr;
104284 struct l2cap_conn *conn;
104285- int len, err = 0;
104286+ int err = 0;
104287+ size_t len = optlen;
104288 u32 opt;
104289
104290 BT_DBG("sk %p", sk);
104291@@ -766,7 +768,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
104292
104293 sec.level = BT_SECURITY_LOW;
104294
104295- len = min_t(unsigned int, sizeof(sec), optlen);
104296+ len = min(sizeof(sec), len);
104297 if (copy_from_user((char *) &sec, optval, len)) {
104298 err = -EFAULT;
104299 break;
104300@@ -861,7 +863,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
104301
104302 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
104303
104304- len = min_t(unsigned int, sizeof(pwr), optlen);
104305+ len = min(sizeof(pwr), len);
104306 if (copy_from_user((char *) &pwr, optval, len)) {
104307 err = -EFAULT;
104308 break;
104309diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
104310index c603a5e..7f08991 100644
104311--- a/net/bluetooth/rfcomm/sock.c
104312+++ b/net/bluetooth/rfcomm/sock.c
104313@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
104314 struct sock *sk = sock->sk;
104315 struct bt_security sec;
104316 int err = 0;
104317- size_t len;
104318+ size_t len = optlen;
104319 u32 opt;
104320
104321 BT_DBG("sk %p", sk);
104322@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
104323
104324 sec.level = BT_SECURITY_LOW;
104325
104326- len = min_t(unsigned int, sizeof(sec), optlen);
104327+ len = min(sizeof(sec), len);
104328 if (copy_from_user((char *) &sec, optval, len)) {
104329 err = -EFAULT;
104330 break;
104331diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
104332index 8e385a0..a5bdd8e 100644
104333--- a/net/bluetooth/rfcomm/tty.c
104334+++ b/net/bluetooth/rfcomm/tty.c
104335@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
104336 BT_DBG("tty %p id %d", tty, tty->index);
104337
104338 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
104339- dev->channel, dev->port.count);
104340+ dev->channel, atomic_read(&dev->port.count));
104341
104342 err = tty_port_open(&dev->port, tty, filp);
104343 if (err)
104344@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
104345 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
104346
104347 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
104348- dev->port.count);
104349+ atomic_read(&dev->port.count));
104350
104351 tty_port_close(&dev->port, tty, filp);
104352 }
104353diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
104354index 1059ed3..d70846a 100644
104355--- a/net/bridge/netfilter/ebtables.c
104356+++ b/net/bridge/netfilter/ebtables.c
104357@@ -1524,7 +1524,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104358 tmp.valid_hooks = t->table->valid_hooks;
104359 }
104360 mutex_unlock(&ebt_mutex);
104361- if (copy_to_user(user, &tmp, *len) != 0) {
104362+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
104363 BUGPRINT("c2u Didn't work\n");
104364 ret = -EFAULT;
104365 break;
104366@@ -2330,7 +2330,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
104367 goto out;
104368 tmp.valid_hooks = t->valid_hooks;
104369
104370- if (copy_to_user(user, &tmp, *len) != 0) {
104371+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
104372 ret = -EFAULT;
104373 break;
104374 }
104375@@ -2341,7 +2341,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
104376 tmp.entries_size = t->table->entries_size;
104377 tmp.valid_hooks = t->table->valid_hooks;
104378
104379- if (copy_to_user(user, &tmp, *len) != 0) {
104380+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
104381 ret = -EFAULT;
104382 break;
104383 }
104384diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
104385index 0f45522..dab651f 100644
104386--- a/net/caif/cfctrl.c
104387+++ b/net/caif/cfctrl.c
104388@@ -10,6 +10,7 @@
104389 #include <linux/spinlock.h>
104390 #include <linux/slab.h>
104391 #include <linux/pkt_sched.h>
104392+#include <linux/sched.h>
104393 #include <net/caif/caif_layer.h>
104394 #include <net/caif/cfpkt.h>
104395 #include <net/caif/cfctrl.h>
104396@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
104397 memset(&dev_info, 0, sizeof(dev_info));
104398 dev_info.id = 0xff;
104399 cfsrvl_init(&this->serv, 0, &dev_info, false);
104400- atomic_set(&this->req_seq_no, 1);
104401- atomic_set(&this->rsp_seq_no, 1);
104402+ atomic_set_unchecked(&this->req_seq_no, 1);
104403+ atomic_set_unchecked(&this->rsp_seq_no, 1);
104404 this->serv.layer.receive = cfctrl_recv;
104405 sprintf(this->serv.layer.name, "ctrl");
104406 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
104407@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
104408 struct cfctrl_request_info *req)
104409 {
104410 spin_lock_bh(&ctrl->info_list_lock);
104411- atomic_inc(&ctrl->req_seq_no);
104412- req->sequence_no = atomic_read(&ctrl->req_seq_no);
104413+ atomic_inc_unchecked(&ctrl->req_seq_no);
104414+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
104415 list_add_tail(&req->list, &ctrl->list);
104416 spin_unlock_bh(&ctrl->info_list_lock);
104417 }
104418@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
104419 if (p != first)
104420 pr_warn("Requests are not received in order\n");
104421
104422- atomic_set(&ctrl->rsp_seq_no,
104423+ atomic_set_unchecked(&ctrl->rsp_seq_no,
104424 p->sequence_no);
104425 list_del(&p->list);
104426 goto out;
104427diff --git a/net/can/af_can.c b/net/can/af_can.c
104428index ce82337..5d17b4d 100644
104429--- a/net/can/af_can.c
104430+++ b/net/can/af_can.c
104431@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
104432 };
104433
104434 /* notifier block for netdevice event */
104435-static struct notifier_block can_netdev_notifier __read_mostly = {
104436+static struct notifier_block can_netdev_notifier = {
104437 .notifier_call = can_notifier,
104438 };
104439
104440diff --git a/net/can/bcm.c b/net/can/bcm.c
104441index dcb75c0..24b1b43 100644
104442--- a/net/can/bcm.c
104443+++ b/net/can/bcm.c
104444@@ -1624,7 +1624,7 @@ static int __init bcm_module_init(void)
104445 }
104446
104447 /* create /proc/net/can-bcm directory */
104448- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
104449+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
104450 return 0;
104451 }
104452
104453diff --git a/net/can/gw.c b/net/can/gw.c
104454index 050a211..bb9fe33 100644
104455--- a/net/can/gw.c
104456+++ b/net/can/gw.c
104457@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
104458 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
104459
104460 static HLIST_HEAD(cgw_list);
104461-static struct notifier_block notifier;
104462
104463 static struct kmem_cache *cgw_cache __read_mostly;
104464
104465@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
104466 return err;
104467 }
104468
104469+static struct notifier_block notifier = {
104470+ .notifier_call = cgw_notifier
104471+};
104472+
104473 static __init int cgw_module_init(void)
104474 {
104475 /* sanitize given module parameter */
104476@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
104477 return -ENOMEM;
104478
104479 /* set notifier */
104480- notifier.notifier_call = cgw_notifier;
104481 register_netdevice_notifier(&notifier);
104482
104483 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
104484diff --git a/net/can/proc.c b/net/can/proc.c
104485index 1a19b98..df2b4ec 100644
104486--- a/net/can/proc.c
104487+++ b/net/can/proc.c
104488@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
104489 void can_init_proc(void)
104490 {
104491 /* create /proc/net/can directory */
104492- can_dir = proc_mkdir("can", init_net.proc_net);
104493+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
104494
104495 if (!can_dir) {
104496 printk(KERN_INFO "can: failed to create /proc/net/can . "
104497diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
104498index 1948d59..9e854d5 100644
104499--- a/net/ceph/messenger.c
104500+++ b/net/ceph/messenger.c
104501@@ -187,7 +187,7 @@ static void con_fault(struct ceph_connection *con);
104502 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
104503
104504 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
104505-static atomic_t addr_str_seq = ATOMIC_INIT(0);
104506+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
104507
104508 static struct page *zero_page; /* used in certain error cases */
104509
104510@@ -198,7 +198,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
104511 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
104512 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
104513
104514- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
104515+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
104516 s = addr_str[i];
104517
104518 switch (ss->ss_family) {
104519diff --git a/net/compat.c b/net/compat.c
104520index bc8aeef..f9c070c 100644
104521--- a/net/compat.c
104522+++ b/net/compat.c
104523@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
104524 return -EFAULT;
104525 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
104526 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
104527- kmsg->msg_name = compat_ptr(tmp1);
104528- kmsg->msg_iov = compat_ptr(tmp2);
104529- kmsg->msg_control = compat_ptr(tmp3);
104530+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
104531+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
104532+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
104533 return 0;
104534 }
104535
104536@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
104537
104538 if (kern_msg->msg_name && kern_msg->msg_namelen) {
104539 if (mode == VERIFY_READ) {
104540- int err = move_addr_to_kernel(kern_msg->msg_name,
104541+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
104542 kern_msg->msg_namelen,
104543 kern_address);
104544 if (err < 0)
104545@@ -100,7 +100,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
104546 }
104547
104548 tot_len = iov_from_user_compat_to_kern(kern_iov,
104549- (struct compat_iovec __user *)kern_msg->msg_iov,
104550+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
104551 kern_msg->msg_iovlen);
104552 if (tot_len >= 0)
104553 kern_msg->msg_iov = kern_iov;
104554@@ -120,20 +120,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
104555
104556 #define CMSG_COMPAT_FIRSTHDR(msg) \
104557 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
104558- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
104559+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
104560 (struct compat_cmsghdr __user *)NULL)
104561
104562 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
104563 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
104564 (ucmlen) <= (unsigned long) \
104565 ((mhdr)->msg_controllen - \
104566- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
104567+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
104568
104569 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
104570 struct compat_cmsghdr __user *cmsg, int cmsg_len)
104571 {
104572 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
104573- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
104574+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
104575 msg->msg_controllen)
104576 return NULL;
104577 return (struct compat_cmsghdr __user *)ptr;
104578@@ -223,7 +223,7 @@ Efault:
104579
104580 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
104581 {
104582- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
104583+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
104584 struct compat_cmsghdr cmhdr;
104585 struct compat_timeval ctv;
104586 struct compat_timespec cts[3];
104587@@ -279,7 +279,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
104588
104589 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
104590 {
104591- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
104592+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
104593 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
104594 int fdnum = scm->fp->count;
104595 struct file **fp = scm->fp->fp;
104596@@ -367,7 +367,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
104597 return -EFAULT;
104598 old_fs = get_fs();
104599 set_fs(KERNEL_DS);
104600- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
104601+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
104602 set_fs(old_fs);
104603
104604 return err;
104605@@ -428,7 +428,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
104606 len = sizeof(ktime);
104607 old_fs = get_fs();
104608 set_fs(KERNEL_DS);
104609- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
104610+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
104611 set_fs(old_fs);
104612
104613 if (!err) {
104614@@ -571,7 +571,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
104615 case MCAST_JOIN_GROUP:
104616 case MCAST_LEAVE_GROUP:
104617 {
104618- struct compat_group_req __user *gr32 = (void *)optval;
104619+ struct compat_group_req __user *gr32 = (void __user *)optval;
104620 struct group_req __user *kgr =
104621 compat_alloc_user_space(sizeof(struct group_req));
104622 u32 interface;
104623@@ -592,7 +592,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
104624 case MCAST_BLOCK_SOURCE:
104625 case MCAST_UNBLOCK_SOURCE:
104626 {
104627- struct compat_group_source_req __user *gsr32 = (void *)optval;
104628+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
104629 struct group_source_req __user *kgsr = compat_alloc_user_space(
104630 sizeof(struct group_source_req));
104631 u32 interface;
104632@@ -613,7 +613,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
104633 }
104634 case MCAST_MSFILTER:
104635 {
104636- struct compat_group_filter __user *gf32 = (void *)optval;
104637+ struct compat_group_filter __user *gf32 = (void __user *)optval;
104638 struct group_filter __user *kgf;
104639 u32 interface, fmode, numsrc;
104640
104641@@ -651,7 +651,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
104642 char __user *optval, int __user *optlen,
104643 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
104644 {
104645- struct compat_group_filter __user *gf32 = (void *)optval;
104646+ struct compat_group_filter __user *gf32 = (void __user *)optval;
104647 struct group_filter __user *kgf;
104648 int __user *koptlen;
104649 u32 interface, fmode, numsrc;
104650@@ -804,7 +804,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
104651
104652 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
104653 return -EINVAL;
104654- if (copy_from_user(a, args, nas[call]))
104655+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
104656 return -EFAULT;
104657 a0 = a[0];
104658 a1 = a[1];
104659diff --git a/net/core/Makefile b/net/core/Makefile
104660index 71093d9..a8a035b 100644
104661--- a/net/core/Makefile
104662+++ b/net/core/Makefile
104663@@ -21,6 +21,5 @@ obj-$(CONFIG_FIB_RULES) += fib_rules.o
104664 obj-$(CONFIG_TRACEPOINTS) += net-traces.o
104665 obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
104666 obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
104667-obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o
104668 obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
104669 obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
104670diff --git a/net/core/datagram.c b/net/core/datagram.c
104671index 488dd1a..7179f0f 100644
104672--- a/net/core/datagram.c
104673+++ b/net/core/datagram.c
104674@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
104675 }
104676
104677 kfree_skb(skb);
104678- atomic_inc(&sk->sk_drops);
104679+ atomic_inc_unchecked(&sk->sk_drops);
104680 sk_mem_reclaim_partial(sk);
104681
104682 return err;
104683diff --git a/net/core/dev.c b/net/core/dev.c
104684index 367a586..ef2fe17 100644
104685--- a/net/core/dev.c
104686+++ b/net/core/dev.c
104687@@ -1672,14 +1672,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
104688 {
104689 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
104690 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
104691- atomic_long_inc(&dev->rx_dropped);
104692+ atomic_long_inc_unchecked(&dev->rx_dropped);
104693 kfree_skb(skb);
104694 return NET_RX_DROP;
104695 }
104696 }
104697
104698 if (unlikely(!is_skb_forwardable(dev, skb))) {
104699- atomic_long_inc(&dev->rx_dropped);
104700+ atomic_long_inc_unchecked(&dev->rx_dropped);
104701 kfree_skb(skb);
104702 return NET_RX_DROP;
104703 }
104704@@ -2476,7 +2476,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
104705
104706 struct dev_gso_cb {
104707 void (*destructor)(struct sk_buff *skb);
104708-};
104709+} __no_const;
104710
104711 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
104712
104713@@ -2932,7 +2932,7 @@ recursion_alert:
104714 rc = -ENETDOWN;
104715 rcu_read_unlock_bh();
104716
104717- atomic_long_inc(&dev->tx_dropped);
104718+ atomic_long_inc_unchecked(&dev->tx_dropped);
104719 kfree_skb(skb);
104720 return rc;
104721 out:
104722@@ -3276,7 +3276,7 @@ enqueue:
104723
104724 local_irq_restore(flags);
104725
104726- atomic_long_inc(&skb->dev->rx_dropped);
104727+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
104728 kfree_skb(skb);
104729 return NET_RX_DROP;
104730 }
104731@@ -3353,7 +3353,7 @@ int netif_rx_ni(struct sk_buff *skb)
104732 }
104733 EXPORT_SYMBOL(netif_rx_ni);
104734
104735-static void net_tx_action(struct softirq_action *h)
104736+static __latent_entropy void net_tx_action(void)
104737 {
104738 struct softnet_data *sd = &__get_cpu_var(softnet_data);
104739
104740@@ -3686,7 +3686,7 @@ ncls:
104741 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
104742 } else {
104743 drop:
104744- atomic_long_inc(&skb->dev->rx_dropped);
104745+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
104746 kfree_skb(skb);
104747 /* Jamal, now you will not able to escape explaining
104748 * me how you were going to use this. :-)
104749@@ -4406,7 +4406,7 @@ void netif_napi_del(struct napi_struct *napi)
104750 }
104751 EXPORT_SYMBOL(netif_napi_del);
104752
104753-static void net_rx_action(struct softirq_action *h)
104754+static __latent_entropy void net_rx_action(void)
104755 {
104756 struct softnet_data *sd = &__get_cpu_var(softnet_data);
104757 unsigned long time_limit = jiffies + 2;
104758@@ -6403,8 +6403,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
104759 } else {
104760 netdev_stats_to_stats64(storage, &dev->stats);
104761 }
104762- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
104763- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
104764+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
104765+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
104766 return storage;
104767 }
104768 EXPORT_SYMBOL(dev_get_stats);
104769diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
104770index cf999e0..c59a975 100644
104771--- a/net/core/dev_ioctl.c
104772+++ b/net/core/dev_ioctl.c
104773@@ -366,9 +366,13 @@ void dev_load(struct net *net, const char *name)
104774 if (no_module && capable(CAP_NET_ADMIN))
104775 no_module = request_module("netdev-%s", name);
104776 if (no_module && capable(CAP_SYS_MODULE)) {
104777+#ifdef CONFIG_GRKERNSEC_MODHARDEN
104778+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
104779+#else
104780 if (!request_module("%s", name))
104781 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
104782 name);
104783+#endif
104784 }
104785 }
104786 EXPORT_SYMBOL(dev_load);
104787diff --git a/net/core/filter.c b/net/core/filter.c
104788index 1dbf646..0f95703 100644
104789--- a/net/core/filter.c
104790+++ b/net/core/filter.c
104791@@ -1,16 +1,11 @@
104792 /*
104793 * Linux Socket Filter - Kernel level socket filtering
104794 *
104795- * Based on the design of the Berkeley Packet Filter. The new
104796- * internal format has been designed by PLUMgrid:
104797+ * Author:
104798+ * Jay Schulist <jschlst@samba.org>
104799 *
104800- * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
104801- *
104802- * Authors:
104803- *
104804- * Jay Schulist <jschlst@samba.org>
104805- * Alexei Starovoitov <ast@plumgrid.com>
104806- * Daniel Borkmann <dborkman@redhat.com>
104807+ * Based on the design of:
104808+ * - The Berkeley Packet Filter
104809 *
104810 * This program is free software; you can redistribute it and/or
104811 * modify it under the terms of the GNU General Public License
104812@@ -45,27 +40,6 @@
104813 #include <linux/seccomp.h>
104814 #include <linux/if_vlan.h>
104815
104816-/* Registers */
104817-#define BPF_R0 regs[BPF_REG_0]
104818-#define BPF_R1 regs[BPF_REG_1]
104819-#define BPF_R2 regs[BPF_REG_2]
104820-#define BPF_R3 regs[BPF_REG_3]
104821-#define BPF_R4 regs[BPF_REG_4]
104822-#define BPF_R5 regs[BPF_REG_5]
104823-#define BPF_R6 regs[BPF_REG_6]
104824-#define BPF_R7 regs[BPF_REG_7]
104825-#define BPF_R8 regs[BPF_REG_8]
104826-#define BPF_R9 regs[BPF_REG_9]
104827-#define BPF_R10 regs[BPF_REG_10]
104828-
104829-/* Named registers */
104830-#define DST regs[insn->dst_reg]
104831-#define SRC regs[insn->src_reg]
104832-#define FP regs[BPF_REG_FP]
104833-#define ARG1 regs[BPF_REG_ARG1]
104834-#define CTX regs[BPF_REG_CTX]
104835-#define IMM insn->imm
104836-
104837 /* No hurry in this branch
104838 *
104839 * Exported for the bpf jit load helper.
104840@@ -78,9 +52,9 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
104841 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
104842 else if (k >= SKF_LL_OFF)
104843 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
104844+
104845 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
104846 return ptr;
104847-
104848 return NULL;
104849 }
104850
104851@@ -89,7 +63,6 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
104852 {
104853 if (k >= 0)
104854 return skb_header_pointer(skb, k, size, buffer);
104855-
104856 return bpf_internal_load_pointer_neg_helper(skb, k, size);
104857 }
104858
104859@@ -135,960 +108,309 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
104860 }
104861 EXPORT_SYMBOL(sk_filter);
104862
104863-/* Base function for offset calculation. Needs to go into .text section,
104864- * therefore keeping it non-static as well; will also be used by JITs
104865- * anyway later on, so do not let the compiler omit it.
104866- */
104867-noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
104868-{
104869- return 0;
104870-}
104871-
104872 /**
104873- * __sk_run_filter - run a filter on a given context
104874- * @ctx: buffer to run the filter on
104875- * @insn: filter to apply
104876+ * sk_run_filter - run a filter on a socket
104877+ * @skb: buffer to run the filter on
104878+ * @fentry: filter to apply
104879 *
104880- * Decode and apply filter instructions to the skb->data. Return length to
104881- * keep, 0 for none. @ctx is the data we are operating on, @insn is the
104882- * array of filter instructions.
104883+ * Decode and apply filter instructions to the skb->data.
104884+ * Return length to keep, 0 for none. @skb is the data we are
104885+ * filtering, @filter is the array of filter instructions.
104886+ * Because all jumps are guaranteed to be before last instruction,
104887+ * and last instruction guaranteed to be a RET, we dont need to check
104888+ * flen. (We used to pass to this function the length of filter)
104889 */
104890-static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
104891+unsigned int sk_run_filter(const struct sk_buff *skb,
104892+ const struct sock_filter *fentry)
104893 {
104894- u64 stack[MAX_BPF_STACK / sizeof(u64)];
104895- u64 regs[MAX_BPF_REG], tmp;
104896- static const void *jumptable[256] = {
104897- [0 ... 255] = &&default_label,
104898- /* Now overwrite non-defaults ... */
104899- /* 32 bit ALU operations */
104900- [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
104901- [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
104902- [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
104903- [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
104904- [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
104905- [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
104906- [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
104907- [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
104908- [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
104909- [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
104910- [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
104911- [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
104912- [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
104913- [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
104914- [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
104915- [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
104916- [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
104917- [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
104918- [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
104919- [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
104920- [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
104921- [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
104922- [BPF_ALU | BPF_NEG] = &&ALU_NEG,
104923- [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
104924- [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
104925- /* 64 bit ALU operations */
104926- [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
104927- [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
104928- [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
104929- [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
104930- [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
104931- [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
104932- [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
104933- [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
104934- [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
104935- [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
104936- [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
104937- [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
104938- [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
104939- [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
104940- [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
104941- [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
104942- [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
104943- [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
104944- [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
104945- [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
104946- [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
104947- [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
104948- [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
104949- [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
104950- [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
104951- /* Call instruction */
104952- [BPF_JMP | BPF_CALL] = &&JMP_CALL,
104953- /* Jumps */
104954- [BPF_JMP | BPF_JA] = &&JMP_JA,
104955- [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
104956- [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
104957- [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
104958- [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
104959- [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
104960- [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
104961- [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
104962- [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
104963- [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
104964- [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
104965- [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
104966- [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
104967- [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
104968- [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
104969- /* Program return */
104970- [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
104971- /* Store instructions */
104972- [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
104973- [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
104974- [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
104975- [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
104976- [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
104977- [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
104978- [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
104979- [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
104980- [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
104981- [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
104982- /* Load instructions */
104983- [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
104984- [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
104985- [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
104986- [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
104987- [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
104988- [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
104989- [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
104990- [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
104991- [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
104992- [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
104993- };
104994 void *ptr;
104995- int off;
104996-
104997-#define CONT ({ insn++; goto select_insn; })
104998-#define CONT_JMP ({ insn++; goto select_insn; })
104999-
105000- FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
105001- ARG1 = (u64) (unsigned long) ctx;
105002-
105003- /* Registers used in classic BPF programs need to be reset first. */
105004- regs[BPF_REG_A] = 0;
105005- regs[BPF_REG_X] = 0;
105006-
105007-select_insn:
105008- goto *jumptable[insn->code];
105009-
105010- /* ALU */
105011-#define ALU(OPCODE, OP) \
105012- ALU64_##OPCODE##_X: \
105013- DST = DST OP SRC; \
105014- CONT; \
105015- ALU_##OPCODE##_X: \
105016- DST = (u32) DST OP (u32) SRC; \
105017- CONT; \
105018- ALU64_##OPCODE##_K: \
105019- DST = DST OP IMM; \
105020- CONT; \
105021- ALU_##OPCODE##_K: \
105022- DST = (u32) DST OP (u32) IMM; \
105023- CONT;
105024-
105025- ALU(ADD, +)
105026- ALU(SUB, -)
105027- ALU(AND, &)
105028- ALU(OR, |)
105029- ALU(LSH, <<)
105030- ALU(RSH, >>)
105031- ALU(XOR, ^)
105032- ALU(MUL, *)
105033-#undef ALU
105034- ALU_NEG:
105035- DST = (u32) -DST;
105036- CONT;
105037- ALU64_NEG:
105038- DST = -DST;
105039- CONT;
105040- ALU_MOV_X:
105041- DST = (u32) SRC;
105042- CONT;
105043- ALU_MOV_K:
105044- DST = (u32) IMM;
105045- CONT;
105046- ALU64_MOV_X:
105047- DST = SRC;
105048- CONT;
105049- ALU64_MOV_K:
105050- DST = IMM;
105051- CONT;
105052- ALU64_ARSH_X:
105053- (*(s64 *) &DST) >>= SRC;
105054- CONT;
105055- ALU64_ARSH_K:
105056- (*(s64 *) &DST) >>= IMM;
105057- CONT;
105058- ALU64_MOD_X:
105059- if (unlikely(SRC == 0))
105060- return 0;
105061- tmp = DST;
105062- DST = do_div(tmp, SRC);
105063- CONT;
105064- ALU_MOD_X:
105065- if (unlikely(SRC == 0))
105066- return 0;
105067- tmp = (u32) DST;
105068- DST = do_div(tmp, (u32) SRC);
105069- CONT;
105070- ALU64_MOD_K:
105071- tmp = DST;
105072- DST = do_div(tmp, IMM);
105073- CONT;
105074- ALU_MOD_K:
105075- tmp = (u32) DST;
105076- DST = do_div(tmp, (u32) IMM);
105077- CONT;
105078- ALU64_DIV_X:
105079- if (unlikely(SRC == 0))
105080- return 0;
105081- do_div(DST, SRC);
105082- CONT;
105083- ALU_DIV_X:
105084- if (unlikely(SRC == 0))
105085- return 0;
105086- tmp = (u32) DST;
105087- do_div(tmp, (u32) SRC);
105088- DST = (u32) tmp;
105089- CONT;
105090- ALU64_DIV_K:
105091- do_div(DST, IMM);
105092- CONT;
105093- ALU_DIV_K:
105094- tmp = (u32) DST;
105095- do_div(tmp, (u32) IMM);
105096- DST = (u32) tmp;
105097- CONT;
105098- ALU_END_TO_BE:
105099- switch (IMM) {
105100- case 16:
105101- DST = (__force u16) cpu_to_be16(DST);
105102- break;
105103- case 32:
105104- DST = (__force u32) cpu_to_be32(DST);
105105- break;
105106- case 64:
105107- DST = (__force u64) cpu_to_be64(DST);
105108- break;
105109- }
105110- CONT;
105111- ALU_END_TO_LE:
105112- switch (IMM) {
105113- case 16:
105114- DST = (__force u16) cpu_to_le16(DST);
105115- break;
105116- case 32:
105117- DST = (__force u32) cpu_to_le32(DST);
105118- break;
105119- case 64:
105120- DST = (__force u64) cpu_to_le64(DST);
105121- break;
105122- }
105123- CONT;
105124-
105125- /* CALL */
105126- JMP_CALL:
105127- /* Function call scratches BPF_R1-BPF_R5 registers,
105128- * preserves BPF_R6-BPF_R9, and stores return value
105129- * into BPF_R0.
105130- */
105131- BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
105132- BPF_R4, BPF_R5);
105133- CONT;
105134-
105135- /* JMP */
105136- JMP_JA:
105137- insn += insn->off;
105138- CONT;
105139- JMP_JEQ_X:
105140- if (DST == SRC) {
105141- insn += insn->off;
105142- CONT_JMP;
105143- }
105144- CONT;
105145- JMP_JEQ_K:
105146- if (DST == IMM) {
105147- insn += insn->off;
105148- CONT_JMP;
105149- }
105150- CONT;
105151- JMP_JNE_X:
105152- if (DST != SRC) {
105153- insn += insn->off;
105154- CONT_JMP;
105155- }
105156- CONT;
105157- JMP_JNE_K:
105158- if (DST != IMM) {
105159- insn += insn->off;
105160- CONT_JMP;
105161- }
105162- CONT;
105163- JMP_JGT_X:
105164- if (DST > SRC) {
105165- insn += insn->off;
105166- CONT_JMP;
105167- }
105168- CONT;
105169- JMP_JGT_K:
105170- if (DST > IMM) {
105171- insn += insn->off;
105172- CONT_JMP;
105173- }
105174- CONT;
105175- JMP_JGE_X:
105176- if (DST >= SRC) {
105177- insn += insn->off;
105178- CONT_JMP;
105179- }
105180- CONT;
105181- JMP_JGE_K:
105182- if (DST >= IMM) {
105183- insn += insn->off;
105184- CONT_JMP;
105185- }
105186- CONT;
105187- JMP_JSGT_X:
105188- if (((s64) DST) > ((s64) SRC)) {
105189- insn += insn->off;
105190- CONT_JMP;
105191- }
105192- CONT;
105193- JMP_JSGT_K:
105194- if (((s64) DST) > ((s64) IMM)) {
105195- insn += insn->off;
105196- CONT_JMP;
105197- }
105198- CONT;
105199- JMP_JSGE_X:
105200- if (((s64) DST) >= ((s64) SRC)) {
105201- insn += insn->off;
105202- CONT_JMP;
105203- }
105204- CONT;
105205- JMP_JSGE_K:
105206- if (((s64) DST) >= ((s64) IMM)) {
105207- insn += insn->off;
105208- CONT_JMP;
105209- }
105210- CONT;
105211- JMP_JSET_X:
105212- if (DST & SRC) {
105213- insn += insn->off;
105214- CONT_JMP;
105215- }
105216- CONT;
105217- JMP_JSET_K:
105218- if (DST & IMM) {
105219- insn += insn->off;
105220- CONT_JMP;
105221- }
105222- CONT;
105223- JMP_EXIT:
105224- return BPF_R0;
105225-
105226- /* STX and ST and LDX*/
105227-#define LDST(SIZEOP, SIZE) \
105228- STX_MEM_##SIZEOP: \
105229- *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
105230- CONT; \
105231- ST_MEM_##SIZEOP: \
105232- *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
105233- CONT; \
105234- LDX_MEM_##SIZEOP: \
105235- DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
105236- CONT;
105237-
105238- LDST(B, u8)
105239- LDST(H, u16)
105240- LDST(W, u32)
105241- LDST(DW, u64)
105242-#undef LDST
105243- STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
105244- atomic_add((u32) SRC, (atomic_t *)(unsigned long)
105245- (DST + insn->off));
105246- CONT;
105247- STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
105248- atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
105249- (DST + insn->off));
105250- CONT;
105251- LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
105252- off = IMM;
105253-load_word:
105254- /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
105255- * only appearing in the programs where ctx ==
105256- * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
105257- * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
105258- * internal BPF verifier will check that BPF_R6 ==
105259- * ctx.
105260- *
105261- * BPF_ABS and BPF_IND are wrappers of function calls,
105262- * so they scratch BPF_R1-BPF_R5 registers, preserve
105263- * BPF_R6-BPF_R9, and store return value into BPF_R0.
105264- *
105265- * Implicit input:
105266- * ctx == skb == BPF_R6 == CTX
105267- *
105268- * Explicit input:
105269- * SRC == any register
105270- * IMM == 32-bit immediate
105271- *
105272- * Output:
105273- * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
105274- */
105275-
105276- ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
105277- if (likely(ptr != NULL)) {
105278- BPF_R0 = get_unaligned_be32(ptr);
105279- CONT;
105280- }
105281-
105282- return 0;
105283- LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
105284- off = IMM;
105285-load_half:
105286- ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
105287- if (likely(ptr != NULL)) {
105288- BPF_R0 = get_unaligned_be16(ptr);
105289- CONT;
105290- }
105291-
105292- return 0;
105293- LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
105294- off = IMM;
105295-load_byte:
105296- ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
105297- if (likely(ptr != NULL)) {
105298- BPF_R0 = *(u8 *)ptr;
105299- CONT;
105300- }
105301-
105302- return 0;
105303- LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
105304- off = IMM + SRC;
105305- goto load_word;
105306- LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
105307- off = IMM + SRC;
105308- goto load_half;
105309- LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
105310- off = IMM + SRC;
105311- goto load_byte;
105312-
105313- default_label:
105314- /* If we ever reach this, we have a bug somewhere. */
105315- WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
105316- return 0;
105317-}
105318-
105319-/* Helper to find the offset of pkt_type in sk_buff structure. We want
105320- * to make sure its still a 3bit field starting at a byte boundary;
105321- * taken from arch/x86/net/bpf_jit_comp.c.
105322- */
105323-#ifdef __BIG_ENDIAN_BITFIELD
105324-#define PKT_TYPE_MAX (7 << 5)
105325+ u32 A = 0; /* Accumulator */
105326+ u32 X = 0; /* Index Register */
105327+ u32 mem[BPF_MEMWORDS] = {}; /* Scratch Memory Store */
105328+ u32 tmp;
105329+ int k;
105330+
105331+ /*
105332+ * Process array of filter instructions.
105333+ */
105334+ for (;; fentry++) {
105335+#if defined(CONFIG_X86_32)
105336+#define K (fentry->k)
105337 #else
105338-#define PKT_TYPE_MAX 7
105339+ const u32 K = fentry->k;
105340 #endif
105341-static unsigned int pkt_type_offset(void)
105342-{
105343- struct sk_buff skb_probe = { .pkt_type = ~0, };
105344- u8 *ct = (u8 *) &skb_probe;
105345- unsigned int off;
105346
105347- for (off = 0; off < sizeof(struct sk_buff); off++) {
105348- if (ct[off] == PKT_TYPE_MAX)
105349- return off;
105350- }
105351-
105352- pr_err_once("Please fix %s, as pkt_type couldn't be found!\n", __func__);
105353- return -1;
105354-}
105355-
105356-static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
105357-{
105358- return __skb_get_poff((struct sk_buff *)(unsigned long) ctx);
105359-}
105360-
105361-static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
105362-{
105363- struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
105364- struct nlattr *nla;
105365-
105366- if (skb_is_nonlinear(skb))
105367- return 0;
105368-
105369- if (skb->len < sizeof(struct nlattr))
105370- return 0;
105371-
105372- if (a > skb->len - sizeof(struct nlattr))
105373- return 0;
105374-
105375- nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
105376- if (nla)
105377- return (void *) nla - (void *) skb->data;
105378-
105379- return 0;
105380-}
105381-
105382-static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
105383-{
105384- struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
105385- struct nlattr *nla;
105386-
105387- if (skb_is_nonlinear(skb))
105388- return 0;
105389-
105390- if (skb->len < sizeof(struct nlattr))
105391- return 0;
105392-
105393- if (a > skb->len - sizeof(struct nlattr))
105394- return 0;
105395-
105396- nla = (struct nlattr *) &skb->data[a];
105397- if (nla->nla_len > skb->len - a)
105398- return 0;
105399-
105400- nla = nla_find_nested(nla, x);
105401- if (nla)
105402- return (void *) nla - (void *) skb->data;
105403-
105404- return 0;
105405-}
105406-
105407-static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
105408-{
105409- return raw_smp_processor_id();
105410-}
105411-
105412-/* note that this only generates 32-bit random numbers */
105413-static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
105414-{
105415- return prandom_u32();
105416-}
105417-
105418-static bool convert_bpf_extensions(struct sock_filter *fp,
105419- struct sock_filter_int **insnp)
105420-{
105421- struct sock_filter_int *insn = *insnp;
105422-
105423- switch (fp->k) {
105424- case SKF_AD_OFF + SKF_AD_PROTOCOL:
105425- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
105426-
105427- /* A = *(u16 *) (CTX + offsetof(protocol)) */
105428- *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
105429- offsetof(struct sk_buff, protocol));
105430- /* A = ntohs(A) [emitting a nop or swap16] */
105431- *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
105432- break;
105433-
105434- case SKF_AD_OFF + SKF_AD_PKTTYPE:
105435- *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
105436- pkt_type_offset());
105437- if (insn->off < 0)
105438- return false;
105439- insn++;
105440- *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
105441-#ifdef __BIG_ENDIAN_BITFIELD
105442- insn++;
105443- *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 5);
105444-#endif
105445- break;
105446-
105447- case SKF_AD_OFF + SKF_AD_IFINDEX:
105448- case SKF_AD_OFF + SKF_AD_HATYPE:
105449- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
105450- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
105451- BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
105452-
105453- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
105454- BPF_REG_TMP, BPF_REG_CTX,
105455- offsetof(struct sk_buff, dev));
105456- /* if (tmp != 0) goto pc + 1 */
105457- *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
105458- *insn++ = BPF_EXIT_INSN();
105459- if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
105460- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
105461- offsetof(struct net_device, ifindex));
105462- else
105463- *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
105464- offsetof(struct net_device, type));
105465- break;
105466-
105467- case SKF_AD_OFF + SKF_AD_MARK:
105468- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
105469-
105470- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
105471- offsetof(struct sk_buff, mark));
105472- break;
105473-
105474- case SKF_AD_OFF + SKF_AD_RXHASH:
105475- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
105476-
105477- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
105478- offsetof(struct sk_buff, hash));
105479- break;
105480-
105481- case SKF_AD_OFF + SKF_AD_QUEUE:
105482- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
105483-
105484- *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
105485- offsetof(struct sk_buff, queue_mapping));
105486- break;
105487-
105488- case SKF_AD_OFF + SKF_AD_VLAN_TAG:
105489- case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
105490- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
105491- BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
105492-
105493- /* A = *(u16 *) (CTX + offsetof(vlan_tci)) */
105494- *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
105495- offsetof(struct sk_buff, vlan_tci));
105496- if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
105497- *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
105498- ~VLAN_TAG_PRESENT);
105499- } else {
105500- /* A >>= 12 */
105501- *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
105502- /* A &= 1 */
105503- *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
105504- }
105505- break;
105506-
105507- case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
105508- case SKF_AD_OFF + SKF_AD_NLATTR:
105509- case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
105510- case SKF_AD_OFF + SKF_AD_CPU:
105511- case SKF_AD_OFF + SKF_AD_RANDOM:
105512- /* arg1 = CTX */
105513- *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
105514- /* arg2 = A */
105515- *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
105516- /* arg3 = X */
105517- *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
105518- /* Emit call(arg1=CTX, arg2=A, arg3=X) */
105519- switch (fp->k) {
105520- case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
105521- *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
105522- break;
105523- case SKF_AD_OFF + SKF_AD_NLATTR:
105524- *insn = BPF_EMIT_CALL(__skb_get_nlattr);
105525- break;
105526- case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
105527- *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
105528- break;
105529- case SKF_AD_OFF + SKF_AD_CPU:
105530- *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
105531- break;
105532- case SKF_AD_OFF + SKF_AD_RANDOM:
105533- *insn = BPF_EMIT_CALL(__get_random_u32);
105534- break;
105535- }
105536- break;
105537-
105538- case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
105539- /* A ^= X */
105540- *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
105541- break;
105542-
105543- default:
105544- /* This is just a dummy call to avoid letting the compiler
105545- * evict __bpf_call_base() as an optimization. Placed here
105546- * where no-one bothers.
105547- */
105548- BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
105549- return false;
105550- }
105551-
105552- *insnp = insn;
105553- return true;
105554-}
105555-
105556-/**
105557- * sk_convert_filter - convert filter program
105558- * @prog: the user passed filter program
105559- * @len: the length of the user passed filter program
105560- * @new_prog: buffer where converted program will be stored
105561- * @new_len: pointer to store length of converted program
105562- *
105563- * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
105564- * Conversion workflow:
105565- *
105566- * 1) First pass for calculating the new program length:
105567- * sk_convert_filter(old_prog, old_len, NULL, &new_len)
105568- *
105569- * 2) 2nd pass to remap in two passes: 1st pass finds new
105570- * jump offsets, 2nd pass remapping:
105571- * new_prog = kmalloc(sizeof(struct sock_filter_int) * new_len);
105572- * sk_convert_filter(old_prog, old_len, new_prog, &new_len);
105573- *
105574- * User BPF's register A is mapped to our BPF register 6, user BPF
105575- * register X is mapped to BPF register 7; frame pointer is always
105576- * register 10; Context 'void *ctx' is stored in register 1, that is,
105577- * for socket filters: ctx == 'struct sk_buff *', for seccomp:
105578- * ctx == 'struct seccomp_data *'.
105579- */
105580-int sk_convert_filter(struct sock_filter *prog, int len,
105581- struct sock_filter_int *new_prog, int *new_len)
105582-{
105583- int new_flen = 0, pass = 0, target, i;
105584- struct sock_filter_int *new_insn;
105585- struct sock_filter *fp;
105586- int *addrs = NULL;
105587- u8 bpf_src;
105588-
105589- BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
105590- BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
105591-
105592- if (len <= 0 || len > BPF_MAXINSNS)
105593- return -EINVAL;
105594-
105595- if (new_prog) {
105596- addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
105597- if (!addrs)
105598- return -ENOMEM;
105599- }
105600-
105601-do_pass:
105602- new_insn = new_prog;
105603- fp = prog;
105604-
105605- if (new_insn)
105606- *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
105607- new_insn++;
105608-
105609- for (i = 0; i < len; fp++, i++) {
105610- struct sock_filter_int tmp_insns[6] = { };
105611- struct sock_filter_int *insn = tmp_insns;
105612-
105613- if (addrs)
105614- addrs[i] = new_insn - new_prog;
105615-
105616- switch (fp->code) {
105617- /* All arithmetic insns and skb loads map as-is. */
105618- case BPF_ALU | BPF_ADD | BPF_X:
105619- case BPF_ALU | BPF_ADD | BPF_K:
105620- case BPF_ALU | BPF_SUB | BPF_X:
105621- case BPF_ALU | BPF_SUB | BPF_K:
105622- case BPF_ALU | BPF_AND | BPF_X:
105623- case BPF_ALU | BPF_AND | BPF_K:
105624- case BPF_ALU | BPF_OR | BPF_X:
105625- case BPF_ALU | BPF_OR | BPF_K:
105626- case BPF_ALU | BPF_LSH | BPF_X:
105627- case BPF_ALU | BPF_LSH | BPF_K:
105628- case BPF_ALU | BPF_RSH | BPF_X:
105629- case BPF_ALU | BPF_RSH | BPF_K:
105630- case BPF_ALU | BPF_XOR | BPF_X:
105631- case BPF_ALU | BPF_XOR | BPF_K:
105632- case BPF_ALU | BPF_MUL | BPF_X:
105633- case BPF_ALU | BPF_MUL | BPF_K:
105634- case BPF_ALU | BPF_DIV | BPF_X:
105635- case BPF_ALU | BPF_DIV | BPF_K:
105636- case BPF_ALU | BPF_MOD | BPF_X:
105637- case BPF_ALU | BPF_MOD | BPF_K:
105638- case BPF_ALU | BPF_NEG:
105639- case BPF_LD | BPF_ABS | BPF_W:
105640- case BPF_LD | BPF_ABS | BPF_H:
105641- case BPF_LD | BPF_ABS | BPF_B:
105642- case BPF_LD | BPF_IND | BPF_W:
105643- case BPF_LD | BPF_IND | BPF_H:
105644- case BPF_LD | BPF_IND | BPF_B:
105645- /* Check for overloaded BPF extension and
105646- * directly convert it if found, otherwise
105647- * just move on with mapping.
105648- */
105649- if (BPF_CLASS(fp->code) == BPF_LD &&
105650- BPF_MODE(fp->code) == BPF_ABS &&
105651- convert_bpf_extensions(fp, &insn))
105652- break;
105653-
105654- *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
105655- break;
105656-
105657- /* Jump transformation cannot use BPF block macros
105658- * everywhere as offset calculation and target updates
105659- * require a bit more work than the rest, i.e. jump
105660- * opcodes map as-is, but offsets need adjustment.
105661- */
105662-
105663-#define BPF_EMIT_JMP \
105664- do { \
105665- if (target >= len || target < 0) \
105666- goto err; \
105667- insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
105668- /* Adjust pc relative offset for 2nd or 3rd insn. */ \
105669- insn->off -= insn - tmp_insns; \
105670- } while (0)
105671-
105672- case BPF_JMP | BPF_JA:
105673- target = i + fp->k + 1;
105674- insn->code = fp->code;
105675- BPF_EMIT_JMP;
105676- break;
105677-
105678- case BPF_JMP | BPF_JEQ | BPF_K:
105679- case BPF_JMP | BPF_JEQ | BPF_X:
105680- case BPF_JMP | BPF_JSET | BPF_K:
105681- case BPF_JMP | BPF_JSET | BPF_X:
105682- case BPF_JMP | BPF_JGT | BPF_K:
105683- case BPF_JMP | BPF_JGT | BPF_X:
105684- case BPF_JMP | BPF_JGE | BPF_K:
105685- case BPF_JMP | BPF_JGE | BPF_X:
105686- if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
105687- /* BPF immediates are signed, zero extend
105688- * immediate into tmp register and use it
105689- * in compare insn.
105690- */
105691- *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
105692-
105693- insn->dst_reg = BPF_REG_A;
105694- insn->src_reg = BPF_REG_TMP;
105695- bpf_src = BPF_X;
105696- } else {
105697- insn->dst_reg = BPF_REG_A;
105698- insn->src_reg = BPF_REG_X;
105699- insn->imm = fp->k;
105700- bpf_src = BPF_SRC(fp->code);
105701+ switch (fentry->code) {
105702+ case BPF_S_ALU_ADD_X:
105703+ A += X;
105704+ continue;
105705+ case BPF_S_ALU_ADD_K:
105706+ A += K;
105707+ continue;
105708+ case BPF_S_ALU_SUB_X:
105709+ A -= X;
105710+ continue;
105711+ case BPF_S_ALU_SUB_K:
105712+ A -= K;
105713+ continue;
105714+ case BPF_S_ALU_MUL_X:
105715+ A *= X;
105716+ continue;
105717+ case BPF_S_ALU_MUL_K:
105718+ A *= K;
105719+ continue;
105720+ case BPF_S_ALU_DIV_X:
105721+ if (X == 0)
105722+ return 0;
105723+ A /= X;
105724+ continue;
105725+ case BPF_S_ALU_DIV_K:
105726+ A /= K;
105727+ continue;
105728+ case BPF_S_ALU_MOD_X:
105729+ if (X == 0)
105730+ return 0;
105731+ A %= X;
105732+ continue;
105733+ case BPF_S_ALU_MOD_K:
105734+ A %= K;
105735+ continue;
105736+ case BPF_S_ALU_AND_X:
105737+ A &= X;
105738+ continue;
105739+ case BPF_S_ALU_AND_K:
105740+ A &= K;
105741+ continue;
105742+ case BPF_S_ALU_OR_X:
105743+ A |= X;
105744+ continue;
105745+ case BPF_S_ALU_OR_K:
105746+ A |= K;
105747+ continue;
105748+ case BPF_S_ANC_ALU_XOR_X:
105749+ case BPF_S_ALU_XOR_X:
105750+ A ^= X;
105751+ continue;
105752+ case BPF_S_ALU_XOR_K:
105753+ A ^= K;
105754+ continue;
105755+ case BPF_S_ALU_LSH_X:
105756+ A <<= X;
105757+ continue;
105758+ case BPF_S_ALU_LSH_K:
105759+ A <<= K;
105760+ continue;
105761+ case BPF_S_ALU_RSH_X:
105762+ A >>= X;
105763+ continue;
105764+ case BPF_S_ALU_RSH_K:
105765+ A >>= K;
105766+ continue;
105767+ case BPF_S_ALU_NEG:
105768+ A = -A;
105769+ continue;
105770+ case BPF_S_JMP_JA:
105771+ fentry += K;
105772+ continue;
105773+ case BPF_S_JMP_JGT_K:
105774+ fentry += (A > K) ? fentry->jt : fentry->jf;
105775+ continue;
105776+ case BPF_S_JMP_JGE_K:
105777+ fentry += (A >= K) ? fentry->jt : fentry->jf;
105778+ continue;
105779+ case BPF_S_JMP_JEQ_K:
105780+ fentry += (A == K) ? fentry->jt : fentry->jf;
105781+ continue;
105782+ case BPF_S_JMP_JSET_K:
105783+ fentry += (A & K) ? fentry->jt : fentry->jf;
105784+ continue;
105785+ case BPF_S_JMP_JGT_X:
105786+ fentry += (A > X) ? fentry->jt : fentry->jf;
105787+ continue;
105788+ case BPF_S_JMP_JGE_X:
105789+ fentry += (A >= X) ? fentry->jt : fentry->jf;
105790+ continue;
105791+ case BPF_S_JMP_JEQ_X:
105792+ fentry += (A == X) ? fentry->jt : fentry->jf;
105793+ continue;
105794+ case BPF_S_JMP_JSET_X:
105795+ fentry += (A & X) ? fentry->jt : fentry->jf;
105796+ continue;
105797+ case BPF_S_LD_W_ABS:
105798+ k = K;
105799+load_w:
105800+ ptr = load_pointer(skb, k, 4, &tmp);
105801+ if (ptr != NULL) {
105802+ A = get_unaligned_be32(ptr);
105803+ continue;
105804 }
105805-
105806- /* Common case where 'jump_false' is next insn. */
105807- if (fp->jf == 0) {
105808- insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
105809- target = i + fp->jt + 1;
105810- BPF_EMIT_JMP;
105811- break;
105812+ return 0;
105813+ case BPF_S_LD_H_ABS:
105814+ k = K;
105815+load_h:
105816+ ptr = load_pointer(skb, k, 2, &tmp);
105817+ if (ptr != NULL) {
105818+ A = get_unaligned_be16(ptr);
105819+ continue;
105820 }
105821-
105822- /* Convert JEQ into JNE when 'jump_true' is next insn. */
105823- if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
105824- insn->code = BPF_JMP | BPF_JNE | bpf_src;
105825- target = i + fp->jf + 1;
105826- BPF_EMIT_JMP;
105827- break;
105828+ return 0;
105829+ case BPF_S_LD_B_ABS:
105830+ k = K;
105831+load_b:
105832+ ptr = load_pointer(skb, k, 1, &tmp);
105833+ if (ptr != NULL) {
105834+ A = *(u8 *)ptr;
105835+ continue;
105836 }
105837-
105838- /* Other jumps are mapped into two insns: Jxx and JA. */
105839- target = i + fp->jt + 1;
105840- insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
105841- BPF_EMIT_JMP;
105842- insn++;
105843-
105844- insn->code = BPF_JMP | BPF_JA;
105845- target = i + fp->jf + 1;
105846- BPF_EMIT_JMP;
105847- break;
105848-
105849- /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
105850- case BPF_LDX | BPF_MSH | BPF_B:
105851- /* tmp = A */
105852- *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
105853- /* A = BPF_R0 = *(u8 *) (skb->data + K) */
105854- *insn++ = BPF_LD_ABS(BPF_B, fp->k);
105855- /* A &= 0xf */
105856- *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
105857- /* A <<= 2 */
105858- *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
105859- /* X = A */
105860- *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
105861- /* A = tmp */
105862- *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
105863- break;
105864-
105865- /* RET_K, RET_A are remaped into 2 insns. */
105866- case BPF_RET | BPF_A:
105867- case BPF_RET | BPF_K:
105868- *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
105869- BPF_K : BPF_X, BPF_REG_0,
105870- BPF_REG_A, fp->k);
105871- *insn = BPF_EXIT_INSN();
105872- break;
105873-
105874- /* Store to stack. */
105875- case BPF_ST:
105876- case BPF_STX:
105877- *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
105878- BPF_ST ? BPF_REG_A : BPF_REG_X,
105879- -(BPF_MEMWORDS - fp->k) * 4);
105880- break;
105881-
105882- /* Load from stack. */
105883- case BPF_LD | BPF_MEM:
105884- case BPF_LDX | BPF_MEM:
105885- *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
105886- BPF_REG_A : BPF_REG_X, BPF_REG_FP,
105887- -(BPF_MEMWORDS - fp->k) * 4);
105888- break;
105889-
105890- /* A = K or X = K */
105891- case BPF_LD | BPF_IMM:
105892- case BPF_LDX | BPF_IMM:
105893- *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
105894- BPF_REG_A : BPF_REG_X, fp->k);
105895- break;
105896-
105897- /* X = A */
105898- case BPF_MISC | BPF_TAX:
105899- *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
105900- break;
105901-
105902- /* A = X */
105903- case BPF_MISC | BPF_TXA:
105904- *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
105905- break;
105906-
105907- /* A = skb->len or X = skb->len */
105908- case BPF_LD | BPF_W | BPF_LEN:
105909- case BPF_LDX | BPF_W | BPF_LEN:
105910- *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
105911- BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
105912- offsetof(struct sk_buff, len));
105913- break;
105914-
105915- /* Access seccomp_data fields. */
105916- case BPF_LDX | BPF_ABS | BPF_W:
105917- /* A = *(u32 *) (ctx + K) */
105918- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
105919- break;
105920-
105921- /* Unkown instruction. */
105922+ return 0;
105923+ case BPF_S_LD_W_LEN:
105924+ A = skb->len;
105925+ continue;
105926+ case BPF_S_LDX_W_LEN:
105927+ X = skb->len;
105928+ continue;
105929+ case BPF_S_LD_W_IND:
105930+ k = X + K;
105931+ goto load_w;
105932+ case BPF_S_LD_H_IND:
105933+ k = X + K;
105934+ goto load_h;
105935+ case BPF_S_LD_B_IND:
105936+ k = X + K;
105937+ goto load_b;
105938+ case BPF_S_LDX_B_MSH:
105939+ ptr = load_pointer(skb, K, 1, &tmp);
105940+ if (ptr != NULL) {
105941+ X = (*(u8 *)ptr & 0xf) << 2;
105942+ continue;
105943+ }
105944+ return 0;
105945+ case BPF_S_LD_IMM:
105946+ A = K;
105947+ continue;
105948+ case BPF_S_LDX_IMM:
105949+ X = K;
105950+ continue;
105951+ case BPF_S_LD_MEM:
105952+ A = mem[K&15];
105953+ continue;
105954+ case BPF_S_LDX_MEM:
105955+ X = mem[K&15];
105956+ continue;
105957+ case BPF_S_MISC_TAX:
105958+ X = A;
105959+ continue;
105960+ case BPF_S_MISC_TXA:
105961+ A = X;
105962+ continue;
105963+ case BPF_S_RET_K:
105964+ return K;
105965+ case BPF_S_RET_A:
105966+ return A;
105967+ case BPF_S_ST:
105968+ mem[K&15] = A;
105969+ continue;
105970+ case BPF_S_STX:
105971+ mem[K&15] = X;
105972+ continue;
105973+ case BPF_S_ANC_PROTOCOL:
105974+ A = ntohs(skb->protocol);
105975+ continue;
105976+ case BPF_S_ANC_PKTTYPE:
105977+ A = skb->pkt_type;
105978+ continue;
105979+ case BPF_S_ANC_IFINDEX:
105980+ if (!skb->dev)
105981+ return 0;
105982+ A = skb->dev->ifindex;
105983+ continue;
105984+ case BPF_S_ANC_MARK:
105985+ A = skb->mark;
105986+ continue;
105987+ case BPF_S_ANC_QUEUE:
105988+ A = skb->queue_mapping;
105989+ continue;
105990+ case BPF_S_ANC_HATYPE:
105991+ if (!skb->dev)
105992+ return 0;
105993+ A = skb->dev->type;
105994+ continue;
105995+ case BPF_S_ANC_RXHASH:
105996+ A = skb->hash;
105997+ continue;
105998+ case BPF_S_ANC_CPU:
105999+ A = raw_smp_processor_id();
106000+ continue;
106001+ case BPF_S_ANC_VLAN_TAG:
106002+ A = vlan_tx_tag_get(skb);
106003+ continue;
106004+ case BPF_S_ANC_VLAN_TAG_PRESENT:
106005+ A = !!vlan_tx_tag_present(skb);
106006+ continue;
106007+ case BPF_S_ANC_PAY_OFFSET:
106008+ A = __skb_get_poff(skb);
106009+ continue;
106010+ case BPF_S_ANC_NLATTR: {
106011+ struct nlattr *nla;
106012+
106013+ if (skb_is_nonlinear(skb))
106014+ return 0;
106015+ if (skb->len < sizeof(struct nlattr))
106016+ return 0;
106017+ if (A > skb->len - sizeof(struct nlattr))
106018+ return 0;
106019+
106020+ nla = nla_find((struct nlattr *)&skb->data[A],
106021+ skb->len - A, X);
106022+ if (nla)
106023+ A = (void *)nla - (void *)skb->data;
106024+ else
106025+ A = 0;
106026+ continue;
106027+ }
106028+ case BPF_S_ANC_NLATTR_NEST: {
106029+ struct nlattr *nla;
106030+
106031+ if (skb_is_nonlinear(skb))
106032+ return 0;
106033+ if (skb->len < sizeof(struct nlattr))
106034+ return 0;
106035+ if (A > skb->len - sizeof(struct nlattr))
106036+ return 0;
106037+
106038+ nla = (struct nlattr *)&skb->data[A];
106039+ if (nla->nla_len > skb->len - A)
106040+ return 0;
106041+
106042+ nla = nla_find_nested(nla, X);
106043+ if (nla)
106044+ A = (void *)nla - (void *)skb->data;
106045+ else
106046+ A = 0;
106047+ continue;
106048+ }
106049+#ifdef CONFIG_SECCOMP_FILTER
106050+ case BPF_S_ANC_SECCOMP_LD_W:
106051+ A = seccomp_bpf_load(fentry->k);
106052+ continue;
106053+#endif
106054 default:
106055- goto err;
106056+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
106057+ fentry->code, fentry->jt,
106058+ fentry->jf, fentry->k);
106059+ BUG();
106060+ return 0;
106061 }
106062-
106063- insn++;
106064- if (new_prog)
106065- memcpy(new_insn, tmp_insns,
106066- sizeof(*insn) * (insn - tmp_insns));
106067- new_insn += insn - tmp_insns;
106068- }
106069-
106070- if (!new_prog) {
106071- /* Only calculating new length. */
106072- *new_len = new_insn - new_prog;
106073- return 0;
106074- }
106075-
106076- pass++;
106077- if (new_flen != new_insn - new_prog) {
106078- new_flen = new_insn - new_prog;
106079- if (pass > 2)
106080- goto err;
106081- goto do_pass;
106082 }
106083
106084- kfree(addrs);
106085- BUG_ON(*new_len != new_flen);
106086 return 0;
106087-err:
106088- kfree(addrs);
106089- return -EINVAL;
106090 }
106091+EXPORT_SYMBOL(sk_run_filter);
106092
106093-/* Security:
106094- *
106095+/*
106096+ * Security :
106097 * A BPF program is able to use 16 cells of memory to store intermediate
106098- * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter()).
106099- *
106100+ * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
106101 * As we dont want to clear mem[] array for each packet going through
106102 * sk_run_filter(), we check that filter loaded by user never try to read
106103 * a cell if not previously written, and we check all branches to be sure
106104@@ -1096,46 +418,44 @@ err:
106105 */
106106 static int check_load_and_stores(struct sock_filter *filter, int flen)
106107 {
106108- u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
106109+ u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
106110 int pc, ret = 0;
106111
106112- BUILD_BUG_ON(BPF_MEMWORDS > 16);
106113-
106114- masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
106115+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
106116+ masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
106117 if (!masks)
106118 return -ENOMEM;
106119-
106120 memset(masks, 0xff, flen * sizeof(*masks));
106121
106122 for (pc = 0; pc < flen; pc++) {
106123 memvalid &= masks[pc];
106124
106125 switch (filter[pc].code) {
106126- case BPF_ST:
106127- case BPF_STX:
106128+ case BPF_S_ST:
106129+ case BPF_S_STX:
106130 memvalid |= (1 << filter[pc].k);
106131 break;
106132- case BPF_LD | BPF_MEM:
106133- case BPF_LDX | BPF_MEM:
106134+ case BPF_S_LD_MEM:
106135+ case BPF_S_LDX_MEM:
106136 if (!(memvalid & (1 << filter[pc].k))) {
106137 ret = -EINVAL;
106138 goto error;
106139 }
106140 break;
106141- case BPF_JMP | BPF_JA:
106142- /* A jump must set masks on target */
106143+ case BPF_S_JMP_JA:
106144+ /* a jump must set masks on target */
106145 masks[pc + 1 + filter[pc].k] &= memvalid;
106146 memvalid = ~0;
106147 break;
106148- case BPF_JMP | BPF_JEQ | BPF_K:
106149- case BPF_JMP | BPF_JEQ | BPF_X:
106150- case BPF_JMP | BPF_JGE | BPF_K:
106151- case BPF_JMP | BPF_JGE | BPF_X:
106152- case BPF_JMP | BPF_JGT | BPF_K:
106153- case BPF_JMP | BPF_JGT | BPF_X:
106154- case BPF_JMP | BPF_JSET | BPF_K:
106155- case BPF_JMP | BPF_JSET | BPF_X:
106156- /* A jump must set masks on targets */
106157+ case BPF_S_JMP_JEQ_K:
106158+ case BPF_S_JMP_JEQ_X:
106159+ case BPF_S_JMP_JGE_K:
106160+ case BPF_S_JMP_JGE_X:
106161+ case BPF_S_JMP_JGT_K:
106162+ case BPF_S_JMP_JGT_X:
106163+ case BPF_S_JMP_JSET_X:
106164+ case BPF_S_JMP_JSET_K:
106165+ /* a jump must set masks on targets */
106166 masks[pc + 1 + filter[pc].jt] &= memvalid;
106167 masks[pc + 1 + filter[pc].jf] &= memvalid;
106168 memvalid = ~0;
106169@@ -1147,72 +467,6 @@ error:
106170 return ret;
106171 }
106172
106173-static bool chk_code_allowed(u16 code_to_probe)
106174-{
106175- static const bool codes[] = {
106176- /* 32 bit ALU operations */
106177- [BPF_ALU | BPF_ADD | BPF_K] = true,
106178- [BPF_ALU | BPF_ADD | BPF_X] = true,
106179- [BPF_ALU | BPF_SUB | BPF_K] = true,
106180- [BPF_ALU | BPF_SUB | BPF_X] = true,
106181- [BPF_ALU | BPF_MUL | BPF_K] = true,
106182- [BPF_ALU | BPF_MUL | BPF_X] = true,
106183- [BPF_ALU | BPF_DIV | BPF_K] = true,
106184- [BPF_ALU | BPF_DIV | BPF_X] = true,
106185- [BPF_ALU | BPF_MOD | BPF_K] = true,
106186- [BPF_ALU | BPF_MOD | BPF_X] = true,
106187- [BPF_ALU | BPF_AND | BPF_K] = true,
106188- [BPF_ALU | BPF_AND | BPF_X] = true,
106189- [BPF_ALU | BPF_OR | BPF_K] = true,
106190- [BPF_ALU | BPF_OR | BPF_X] = true,
106191- [BPF_ALU | BPF_XOR | BPF_K] = true,
106192- [BPF_ALU | BPF_XOR | BPF_X] = true,
106193- [BPF_ALU | BPF_LSH | BPF_K] = true,
106194- [BPF_ALU | BPF_LSH | BPF_X] = true,
106195- [BPF_ALU | BPF_RSH | BPF_K] = true,
106196- [BPF_ALU | BPF_RSH | BPF_X] = true,
106197- [BPF_ALU | BPF_NEG] = true,
106198- /* Load instructions */
106199- [BPF_LD | BPF_W | BPF_ABS] = true,
106200- [BPF_LD | BPF_H | BPF_ABS] = true,
106201- [BPF_LD | BPF_B | BPF_ABS] = true,
106202- [BPF_LD | BPF_W | BPF_LEN] = true,
106203- [BPF_LD | BPF_W | BPF_IND] = true,
106204- [BPF_LD | BPF_H | BPF_IND] = true,
106205- [BPF_LD | BPF_B | BPF_IND] = true,
106206- [BPF_LD | BPF_IMM] = true,
106207- [BPF_LD | BPF_MEM] = true,
106208- [BPF_LDX | BPF_W | BPF_LEN] = true,
106209- [BPF_LDX | BPF_B | BPF_MSH] = true,
106210- [BPF_LDX | BPF_IMM] = true,
106211- [BPF_LDX | BPF_MEM] = true,
106212- /* Store instructions */
106213- [BPF_ST] = true,
106214- [BPF_STX] = true,
106215- /* Misc instructions */
106216- [BPF_MISC | BPF_TAX] = true,
106217- [BPF_MISC | BPF_TXA] = true,
106218- /* Return instructions */
106219- [BPF_RET | BPF_K] = true,
106220- [BPF_RET | BPF_A] = true,
106221- /* Jump instructions */
106222- [BPF_JMP | BPF_JA] = true,
106223- [BPF_JMP | BPF_JEQ | BPF_K] = true,
106224- [BPF_JMP | BPF_JEQ | BPF_X] = true,
106225- [BPF_JMP | BPF_JGE | BPF_K] = true,
106226- [BPF_JMP | BPF_JGE | BPF_X] = true,
106227- [BPF_JMP | BPF_JGT | BPF_K] = true,
106228- [BPF_JMP | BPF_JGT | BPF_X] = true,
106229- [BPF_JMP | BPF_JSET | BPF_K] = true,
106230- [BPF_JMP | BPF_JSET | BPF_X] = true,
106231- };
106232-
106233- if (code_to_probe >= ARRAY_SIZE(codes))
106234- return false;
106235-
106236- return codes[code_to_probe];
106237-}
106238-
106239 /**
106240 * sk_chk_filter - verify socket filter code
106241 * @filter: filter to verify
106242@@ -1229,303 +483,187 @@ static bool chk_code_allowed(u16 code_to_probe)
106243 */
106244 int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
106245 {
106246- bool anc_found;
106247+ /*
106248+ * Valid instructions are initialized to non-0.
106249+ * Invalid instructions are initialized to 0.
106250+ */
106251+ static const u8 codes[] = {
106252+ [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
106253+ [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
106254+ [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
106255+ [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
106256+ [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
106257+ [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
106258+ [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
106259+ [BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K,
106260+ [BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X,
106261+ [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
106262+ [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
106263+ [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
106264+ [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
106265+ [BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K,
106266+ [BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X,
106267+ [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
106268+ [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
106269+ [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
106270+ [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
106271+ [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
106272+ [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
106273+ [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
106274+ [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
106275+ [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
106276+ [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
106277+ [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
106278+ [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
106279+ [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
106280+ [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
106281+ [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
106282+ [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
106283+ [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
106284+ [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
106285+ [BPF_RET|BPF_K] = BPF_S_RET_K,
106286+ [BPF_RET|BPF_A] = BPF_S_RET_A,
106287+ [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
106288+ [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
106289+ [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
106290+ [BPF_ST] = BPF_S_ST,
106291+ [BPF_STX] = BPF_S_STX,
106292+ [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
106293+ [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
106294+ [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
106295+ [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
106296+ [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
106297+ [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
106298+ [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
106299+ [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
106300+ [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
106301+ };
106302 int pc;
106303+ bool anc_found;
106304
106305 if (flen == 0 || flen > BPF_MAXINSNS)
106306 return -EINVAL;
106307
106308- /* Check the filter code now */
106309+ /* check the filter code now */
106310 for (pc = 0; pc < flen; pc++) {
106311 struct sock_filter *ftest = &filter[pc];
106312+ u16 code = ftest->code;
106313
106314- /* May we actually operate on this code? */
106315- if (!chk_code_allowed(ftest->code))
106316+ if (code >= ARRAY_SIZE(codes))
106317+ return -EINVAL;
106318+ code = codes[code];
106319+ if (!code)
106320 return -EINVAL;
106321-
106322 /* Some instructions need special checks */
106323- switch (ftest->code) {
106324- case BPF_ALU | BPF_DIV | BPF_K:
106325- case BPF_ALU | BPF_MOD | BPF_K:
106326- /* Check for division by zero */
106327+ switch (code) {
106328+ case BPF_S_ALU_DIV_K:
106329+ case BPF_S_ALU_MOD_K:
106330+ /* check for division by zero */
106331 if (ftest->k == 0)
106332 return -EINVAL;
106333 break;
106334- case BPF_LD | BPF_MEM:
106335- case BPF_LDX | BPF_MEM:
106336- case BPF_ST:
106337- case BPF_STX:
106338- /* Check for invalid memory addresses */
106339+ case BPF_S_LD_MEM:
106340+ case BPF_S_LDX_MEM:
106341+ case BPF_S_ST:
106342+ case BPF_S_STX:
106343+ /* check for invalid memory addresses */
106344 if (ftest->k >= BPF_MEMWORDS)
106345 return -EINVAL;
106346 break;
106347- case BPF_JMP | BPF_JA:
106348- /* Note, the large ftest->k might cause loops.
106349+ case BPF_S_JMP_JA:
106350+ /*
106351+ * Note, the large ftest->k might cause loops.
106352 * Compare this with conditional jumps below,
106353 * where offsets are limited. --ANK (981016)
106354 */
106355- if (ftest->k >= (unsigned int)(flen - pc - 1))
106356+ if (ftest->k >= (unsigned int)(flen-pc-1))
106357 return -EINVAL;
106358 break;
106359- case BPF_JMP | BPF_JEQ | BPF_K:
106360- case BPF_JMP | BPF_JEQ | BPF_X:
106361- case BPF_JMP | BPF_JGE | BPF_K:
106362- case BPF_JMP | BPF_JGE | BPF_X:
106363- case BPF_JMP | BPF_JGT | BPF_K:
106364- case BPF_JMP | BPF_JGT | BPF_X:
106365- case BPF_JMP | BPF_JSET | BPF_K:
106366- case BPF_JMP | BPF_JSET | BPF_X:
106367- /* Both conditionals must be safe */
106368+ case BPF_S_JMP_JEQ_K:
106369+ case BPF_S_JMP_JEQ_X:
106370+ case BPF_S_JMP_JGE_K:
106371+ case BPF_S_JMP_JGE_X:
106372+ case BPF_S_JMP_JGT_K:
106373+ case BPF_S_JMP_JGT_X:
106374+ case BPF_S_JMP_JSET_X:
106375+ case BPF_S_JMP_JSET_K:
106376+ /* for conditionals both must be safe */
106377 if (pc + ftest->jt + 1 >= flen ||
106378 pc + ftest->jf + 1 >= flen)
106379 return -EINVAL;
106380 break;
106381- case BPF_LD | BPF_W | BPF_ABS:
106382- case BPF_LD | BPF_H | BPF_ABS:
106383- case BPF_LD | BPF_B | BPF_ABS:
106384+ case BPF_S_LD_W_ABS:
106385+ case BPF_S_LD_H_ABS:
106386+ case BPF_S_LD_B_ABS:
106387 anc_found = false;
106388- if (bpf_anc_helper(ftest) & BPF_ANC)
106389- anc_found = true;
106390- /* Ancillary operation unknown or unsupported */
106391+#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
106392+ code = BPF_S_ANC_##CODE; \
106393+ anc_found = true; \
106394+ break
106395+ switch (ftest->k) {
106396+ ANCILLARY(PROTOCOL);
106397+ ANCILLARY(PKTTYPE);
106398+ ANCILLARY(IFINDEX);
106399+ ANCILLARY(NLATTR);
106400+ ANCILLARY(NLATTR_NEST);
106401+ ANCILLARY(MARK);
106402+ ANCILLARY(QUEUE);
106403+ ANCILLARY(HATYPE);
106404+ ANCILLARY(RXHASH);
106405+ ANCILLARY(CPU);
106406+ ANCILLARY(ALU_XOR_X);
106407+ ANCILLARY(VLAN_TAG);
106408+ ANCILLARY(VLAN_TAG_PRESENT);
106409+ ANCILLARY(PAY_OFFSET);
106410+ }
106411+
106412+ /* ancillary operation unknown or unsupported */
106413 if (anc_found == false && ftest->k >= SKF_AD_OFF)
106414 return -EINVAL;
106415 }
106416+ ftest->code = code;
106417 }
106418
106419- /* Last instruction must be a RET code */
106420+ /* last instruction must be a RET code */
106421 switch (filter[flen - 1].code) {
106422- case BPF_RET | BPF_K:
106423- case BPF_RET | BPF_A:
106424+ case BPF_S_RET_K:
106425+ case BPF_S_RET_A:
106426 return check_load_and_stores(filter, flen);
106427 }
106428-
106429 return -EINVAL;
106430 }
106431 EXPORT_SYMBOL(sk_chk_filter);
106432
106433-static int sk_store_orig_filter(struct sk_filter *fp,
106434- const struct sock_fprog *fprog)
106435-{
106436- unsigned int fsize = sk_filter_proglen(fprog);
106437- struct sock_fprog_kern *fkprog;
106438-
106439- fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
106440- if (!fp->orig_prog)
106441- return -ENOMEM;
106442-
106443- fkprog = fp->orig_prog;
106444- fkprog->len = fprog->len;
106445- fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL);
106446- if (!fkprog->filter) {
106447- kfree(fp->orig_prog);
106448- return -ENOMEM;
106449- }
106450-
106451- return 0;
106452-}
106453-
106454-static void sk_release_orig_filter(struct sk_filter *fp)
106455-{
106456- struct sock_fprog_kern *fprog = fp->orig_prog;
106457-
106458- if (fprog) {
106459- kfree(fprog->filter);
106460- kfree(fprog);
106461- }
106462-}
106463-
106464 /**
106465 * sk_filter_release_rcu - Release a socket filter by rcu_head
106466 * @rcu: rcu_head that contains the sk_filter to free
106467 */
106468-static void sk_filter_release_rcu(struct rcu_head *rcu)
106469+void sk_filter_release_rcu(struct rcu_head *rcu)
106470 {
106471 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
106472
106473- sk_release_orig_filter(fp);
106474- sk_filter_free(fp);
106475-}
106476-
106477-/**
106478- * sk_filter_release - release a socket filter
106479- * @fp: filter to remove
106480- *
106481- * Remove a filter from a socket and release its resources.
106482- */
106483-static void sk_filter_release(struct sk_filter *fp)
106484-{
106485- if (atomic_dec_and_test(&fp->refcnt))
106486- call_rcu(&fp->rcu, sk_filter_release_rcu);
106487-}
106488-
106489-void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
106490-{
106491- atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
106492- sk_filter_release(fp);
106493-}
106494-
106495-void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
106496-{
106497- atomic_inc(&fp->refcnt);
106498- atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
106499-}
106500-
106501-static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
106502- struct sock *sk,
106503- unsigned int len)
106504-{
106505- struct sk_filter *fp_new;
106506-
106507- if (sk == NULL)
106508- return krealloc(fp, len, GFP_KERNEL);
106509-
106510- fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
106511- if (fp_new) {
106512- *fp_new = *fp;
106513- /* As we're keeping orig_prog in fp_new along,
106514- * we need to make sure we're not evicting it
106515- * from the old fp.
106516- */
106517- fp->orig_prog = NULL;
106518- sk_filter_uncharge(sk, fp);
106519- }
106520-
106521- return fp_new;
106522-}
106523-
106524-static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
106525- struct sock *sk)
106526-{
106527- struct sock_filter *old_prog;
106528- struct sk_filter *old_fp;
106529- int err, new_len, old_len = fp->len;
106530-
106531- /* We are free to overwrite insns et al right here as it
106532- * won't be used at this point in time anymore internally
106533- * after the migration to the internal BPF instruction
106534- * representation.
106535- */
106536- BUILD_BUG_ON(sizeof(struct sock_filter) !=
106537- sizeof(struct sock_filter_int));
106538-
106539- /* Conversion cannot happen on overlapping memory areas,
106540- * so we need to keep the user BPF around until the 2nd
106541- * pass. At this time, the user BPF is stored in fp->insns.
106542- */
106543- old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
106544- GFP_KERNEL);
106545- if (!old_prog) {
106546- err = -ENOMEM;
106547- goto out_err;
106548- }
106549-
106550- /* 1st pass: calculate the new program length. */
106551- err = sk_convert_filter(old_prog, old_len, NULL, &new_len);
106552- if (err)
106553- goto out_err_free;
106554-
106555- /* Expand fp for appending the new filter representation. */
106556- old_fp = fp;
106557- fp = __sk_migrate_realloc(old_fp, sk, sk_filter_size(new_len));
106558- if (!fp) {
106559- /* The old_fp is still around in case we couldn't
106560- * allocate new memory, so uncharge on that one.
106561- */
106562- fp = old_fp;
106563- err = -ENOMEM;
106564- goto out_err_free;
106565- }
106566-
106567- fp->len = new_len;
106568-
106569- /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
106570- err = sk_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
106571- if (err)
106572- /* 2nd sk_convert_filter() can fail only if it fails
106573- * to allocate memory, remapping must succeed. Note,
106574- * that at this time old_fp has already been released
106575- * by __sk_migrate_realloc().
106576- */
106577- goto out_err_free;
106578-
106579- sk_filter_select_runtime(fp);
106580-
106581- kfree(old_prog);
106582- return fp;
106583-
106584-out_err_free:
106585- kfree(old_prog);
106586-out_err:
106587- /* Rollback filter setup. */
106588- if (sk != NULL)
106589- sk_filter_uncharge(sk, fp);
106590- else
106591- kfree(fp);
106592- return ERR_PTR(err);
106593-}
106594-
106595-void __weak bpf_int_jit_compile(struct sk_filter *prog)
106596-{
106597-}
106598-
106599-/**
106600- * sk_filter_select_runtime - select execution runtime for BPF program
106601- * @fp: sk_filter populated with internal BPF program
106602- *
106603- * try to JIT internal BPF program, if JIT is not available select interpreter
106604- * BPF program will be executed via SK_RUN_FILTER() macro
106605- */
106606-void sk_filter_select_runtime(struct sk_filter *fp)
106607-{
106608- fp->bpf_func = (void *) __sk_run_filter;
106609-
106610- /* Probe if internal BPF can be JITed */
106611- bpf_int_jit_compile(fp);
106612-}
106613-EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
106614-
106615-/* free internal BPF program */
106616-void sk_filter_free(struct sk_filter *fp)
106617-{
106618 bpf_jit_free(fp);
106619 }
106620-EXPORT_SYMBOL_GPL(sk_filter_free);
106621+EXPORT_SYMBOL(sk_filter_release_rcu);
106622
106623-static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
106624- struct sock *sk)
106625+static int __sk_prepare_filter(struct sk_filter *fp)
106626 {
106627 int err;
106628
106629- fp->bpf_func = NULL;
106630- fp->jited = 0;
106631+ fp->bpf_func = sk_run_filter;
106632
106633 err = sk_chk_filter(fp->insns, fp->len);
106634- if (err) {
106635- if (sk != NULL)
106636- sk_filter_uncharge(sk, fp);
106637- else
106638- kfree(fp);
106639- return ERR_PTR(err);
106640- }
106641+ if (err)
106642+ return err;
106643
106644- /* Probe if we can JIT compile the filter and if so, do
106645- * the compilation of the filter.
106646- */
106647 bpf_jit_compile(fp);
106648-
106649- /* JIT compiler couldn't process this filter, so do the
106650- * internal BPF translation for the optimized interpreter.
106651- */
106652- if (!fp->jited)
106653- fp = __sk_migrate_filter(fp, sk);
106654-
106655- return fp;
106656+ return 0;
106657 }
106658
106659 /**
106660 * sk_unattached_filter_create - create an unattached filter
106661+ * @fprog: the filter program
106662 * @pfp: the unattached filter that is created
106663- * @fprog: the filter program
106664 *
106665 * Create a filter independent of any socket. We first run some
106666 * sanity checks on it to make sure it does not explode on us later.
106667@@ -1533,10 +671,11 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
106668 * a negative errno code is returned. On success the return is zero.
106669 */
106670 int sk_unattached_filter_create(struct sk_filter **pfp,
106671- struct sock_fprog_kern *fprog)
106672+ struct sock_fprog *fprog)
106673 {
106674- unsigned int fsize = sk_filter_proglen(fprog);
106675 struct sk_filter *fp;
106676+ unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
106677+ int err;
106678
106679 /* Make sure new filter is there and in the right amounts. */
106680 if (fprog->filter == NULL)
106681@@ -1545,26 +684,20 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
106682 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
106683 if (!fp)
106684 return -ENOMEM;
106685-
106686- memcpy(fp->insns, fprog->filter, fsize);
106687+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
106688
106689 atomic_set(&fp->refcnt, 1);
106690 fp->len = fprog->len;
106691- /* Since unattached filters are not copied back to user
106692- * space through sk_get_filter(), we do not need to hold
106693- * a copy here, and can spare us the work.
106694- */
106695- fp->orig_prog = NULL;
106696
106697- /* __sk_prepare_filter() already takes care of uncharging
106698- * memory in case something goes wrong.
106699- */
106700- fp = __sk_prepare_filter(fp, NULL);
106701- if (IS_ERR(fp))
106702- return PTR_ERR(fp);
106703+ err = __sk_prepare_filter(fp);
106704+ if (err)
106705+ goto free_mem;
106706
106707 *pfp = fp;
106708 return 0;
106709+free_mem:
106710+ kfree(fp);
106711+ return err;
106712 }
106713 EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
106714
106715@@ -1587,7 +720,7 @@ EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
106716 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
106717 {
106718 struct sk_filter *fp, *old_fp;
106719- unsigned int fsize = sk_filter_proglen(fprog);
106720+ unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
106721 unsigned int sk_fsize = sk_filter_size(fprog->len);
106722 int err;
106723
106724@@ -1601,7 +734,6 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
106725 fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
106726 if (!fp)
106727 return -ENOMEM;
106728-
106729 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
106730 sock_kfree_s(sk, fp, sk_fsize);
106731 return -EFAULT;
106732@@ -1610,26 +742,18 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
106733 atomic_set(&fp->refcnt, 1);
106734 fp->len = fprog->len;
106735
106736- err = sk_store_orig_filter(fp, fprog);
106737+ err = __sk_prepare_filter(fp);
106738 if (err) {
106739 sk_filter_uncharge(sk, fp);
106740- return -ENOMEM;
106741+ return err;
106742 }
106743
106744- /* __sk_prepare_filter() already takes care of uncharging
106745- * memory in case something goes wrong.
106746- */
106747- fp = __sk_prepare_filter(fp, sk);
106748- if (IS_ERR(fp))
106749- return PTR_ERR(fp);
106750-
106751 old_fp = rcu_dereference_protected(sk->sk_filter,
106752 sock_owned_by_user(sk));
106753 rcu_assign_pointer(sk->sk_filter, fp);
106754
106755 if (old_fp)
106756 sk_filter_uncharge(sk, old_fp);
106757-
106758 return 0;
106759 }
106760 EXPORT_SYMBOL_GPL(sk_attach_filter);
106761@@ -1649,46 +773,116 @@ int sk_detach_filter(struct sock *sk)
106762 sk_filter_uncharge(sk, filter);
106763 ret = 0;
106764 }
106765-
106766 return ret;
106767 }
106768 EXPORT_SYMBOL_GPL(sk_detach_filter);
106769
106770-int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
106771- unsigned int len)
106772+void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
106773+{
106774+ static const u16 decodes[] = {
106775+ [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
106776+ [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X,
106777+ [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K,
106778+ [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X,
106779+ [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K,
106780+ [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X,
106781+ [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X,
106782+ [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K,
106783+ [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X,
106784+ [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K,
106785+ [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X,
106786+ [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K,
106787+ [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X,
106788+ [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K,
106789+ [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X,
106790+ [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K,
106791+ [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X,
106792+ [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K,
106793+ [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X,
106794+ [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG,
106795+ [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS,
106796+ [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS,
106797+ [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS,
106798+ [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS,
106799+ [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS,
106800+ [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS,
106801+ [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS,
106802+ [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
106803+ [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS,
106804+ [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS,
106805+ [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS,
106806+ [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
106807+ [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
106808+ [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
106809+ [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS,
106810+ [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
106811+ [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
106812+ [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
106813+ [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
106814+ [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
106815+ [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
106816+ [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND,
106817+ [BPF_S_LD_IMM] = BPF_LD|BPF_IMM,
106818+ [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN,
106819+ [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH,
106820+ [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM,
106821+ [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX,
106822+ [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA,
106823+ [BPF_S_RET_K] = BPF_RET|BPF_K,
106824+ [BPF_S_RET_A] = BPF_RET|BPF_A,
106825+ [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K,
106826+ [BPF_S_LD_MEM] = BPF_LD|BPF_MEM,
106827+ [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM,
106828+ [BPF_S_ST] = BPF_ST,
106829+ [BPF_S_STX] = BPF_STX,
106830+ [BPF_S_JMP_JA] = BPF_JMP|BPF_JA,
106831+ [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K,
106832+ [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X,
106833+ [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K,
106834+ [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X,
106835+ [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K,
106836+ [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X,
106837+ [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K,
106838+ [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X,
106839+ };
106840+ u16 code;
106841+
106842+ code = filt->code;
106843+
106844+ to->code = decodes[code];
106845+ to->jt = filt->jt;
106846+ to->jf = filt->jf;
106847+ to->k = filt->k;
106848+}
106849+
106850+int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
106851 {
106852- struct sock_fprog_kern *fprog;
106853 struct sk_filter *filter;
106854- int ret = 0;
106855+ int i, ret;
106856
106857 lock_sock(sk);
106858 filter = rcu_dereference_protected(sk->sk_filter,
106859- sock_owned_by_user(sk));
106860+ sock_owned_by_user(sk));
106861+ ret = 0;
106862 if (!filter)
106863 goto out;
106864-
106865- /* We're copying the filter that has been originally attached,
106866- * so no conversion/decode needed anymore.
106867- */
106868- fprog = filter->orig_prog;
106869-
106870- ret = fprog->len;
106871+ ret = filter->len;
106872 if (!len)
106873- /* User space only enquires number of filter blocks. */
106874 goto out;
106875-
106876 ret = -EINVAL;
106877- if (len < fprog->len)
106878+ if (len < filter->len)
106879 goto out;
106880
106881 ret = -EFAULT;
106882- if (copy_to_user(ubuf, fprog->filter, sk_filter_proglen(fprog)))
106883- goto out;
106884+ for (i = 0; i < filter->len; i++) {
106885+ struct sock_filter fb;
106886
106887- /* Instead of bytes, the API requests to return the number
106888- * of filter blocks.
106889- */
106890- ret = fprog->len;
106891+ sk_decode_filter(&filter->insns[i], &fb);
106892+ if (copy_to_user(&ubuf[i], &fb, sizeof(fb)))
106893+ goto out;
106894+ }
106895+
106896+ ret = filter->len;
106897 out:
106898 release_sock(sk);
106899 return ret;
106900diff --git a/net/core/flow.c b/net/core/flow.c
106901index a0348fd..6951c76 100644
106902--- a/net/core/flow.c
106903+++ b/net/core/flow.c
106904@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
106905 static int flow_entry_valid(struct flow_cache_entry *fle,
106906 struct netns_xfrm *xfrm)
106907 {
106908- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
106909+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
106910 return 0;
106911 if (fle->object && !fle->object->ops->check(fle->object))
106912 return 0;
106913@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
106914 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
106915 fcp->hash_count++;
106916 }
106917- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
106918+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
106919 flo = fle->object;
106920 if (!flo)
106921 goto ret_object;
106922@@ -263,7 +263,7 @@ nocache:
106923 }
106924 flo = resolver(net, key, family, dir, flo, ctx);
106925 if (fle) {
106926- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
106927+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
106928 if (!IS_ERR(flo))
106929 fle->object = flo;
106930 else
106931diff --git a/net/core/iovec.c b/net/core/iovec.c
106932index e1ec45a..e5c6f16 100644
106933--- a/net/core/iovec.c
106934+++ b/net/core/iovec.c
106935@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
106936 if (m->msg_name && m->msg_namelen) {
106937 if (mode == VERIFY_READ) {
106938 void __user *namep;
106939- namep = (void __user __force *) m->msg_name;
106940+ namep = (void __force_user *) m->msg_name;
106941 err = move_addr_to_kernel(namep, m->msg_namelen,
106942 address);
106943 if (err < 0)
106944@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
106945 }
106946
106947 size = m->msg_iovlen * sizeof(struct iovec);
106948- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
106949+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
106950 return -EFAULT;
106951
106952 m->msg_iov = iov;
106953diff --git a/net/core/neighbour.c b/net/core/neighbour.c
106954index ef31fef..8be66d9 100644
106955--- a/net/core/neighbour.c
106956+++ b/net/core/neighbour.c
106957@@ -2825,7 +2825,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
106958 void __user *buffer, size_t *lenp, loff_t *ppos)
106959 {
106960 int size, ret;
106961- struct ctl_table tmp = *ctl;
106962+ ctl_table_no_const tmp = *ctl;
106963
106964 tmp.extra1 = &zero;
106965 tmp.extra2 = &unres_qlen_max;
106966@@ -2887,7 +2887,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
106967 void __user *buffer,
106968 size_t *lenp, loff_t *ppos)
106969 {
106970- struct ctl_table tmp = *ctl;
106971+ ctl_table_no_const tmp = *ctl;
106972 int ret;
106973
106974 tmp.extra1 = &zero;
106975diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
106976index 2bf8329..2eb1423 100644
106977--- a/net/core/net-procfs.c
106978+++ b/net/core/net-procfs.c
106979@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
106980 struct rtnl_link_stats64 temp;
106981 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
106982
106983- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
106984+ if (gr_proc_is_restricted())
106985+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
106986+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
106987+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
106988+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
106989+ else
106990+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
106991 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
106992 dev->name, stats->rx_bytes, stats->rx_packets,
106993 stats->rx_errors,
106994@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
106995 return 0;
106996 }
106997
106998-static const struct seq_operations dev_seq_ops = {
106999+const struct seq_operations dev_seq_ops = {
107000 .start = dev_seq_start,
107001 .next = dev_seq_next,
107002 .stop = dev_seq_stop,
107003@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
107004
107005 static int softnet_seq_open(struct inode *inode, struct file *file)
107006 {
107007- return seq_open(file, &softnet_seq_ops);
107008+ return seq_open_restrict(file, &softnet_seq_ops);
107009 }
107010
107011 static const struct file_operations softnet_seq_fops = {
107012@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
107013 else
107014 seq_printf(seq, "%04x", ntohs(pt->type));
107015
107016+#ifdef CONFIG_GRKERNSEC_HIDESYM
107017+ seq_printf(seq, " %-8s %pf\n",
107018+ pt->dev ? pt->dev->name : "", NULL);
107019+#else
107020 seq_printf(seq, " %-8s %pf\n",
107021 pt->dev ? pt->dev->name : "", pt->func);
107022+#endif
107023 }
107024
107025 return 0;
107026diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
107027index 1cac29e..fb482f3 100644
107028--- a/net/core/net-sysfs.c
107029+++ b/net/core/net-sysfs.c
107030@@ -259,7 +259,7 @@ static ssize_t carrier_changes_show(struct device *dev,
107031 {
107032 struct net_device *netdev = to_net_dev(dev);
107033 return sprintf(buf, fmt_dec,
107034- atomic_read(&netdev->carrier_changes));
107035+ atomic_read_unchecked(&netdev->carrier_changes));
107036 }
107037 static DEVICE_ATTR_RO(carrier_changes);
107038
107039diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
107040index 85b6269..fc77ea0 100644
107041--- a/net/core/net_namespace.c
107042+++ b/net/core/net_namespace.c
107043@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
107044 int error;
107045 LIST_HEAD(net_exit_list);
107046
107047- list_add_tail(&ops->list, list);
107048+ pax_list_add_tail((struct list_head *)&ops->list, list);
107049 if (ops->init || (ops->id && ops->size)) {
107050 for_each_net(net) {
107051 error = ops_init(ops, net);
107052@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
107053
107054 out_undo:
107055 /* If I have an error cleanup all namespaces I initialized */
107056- list_del(&ops->list);
107057+ pax_list_del((struct list_head *)&ops->list);
107058 ops_exit_list(ops, &net_exit_list);
107059 ops_free_list(ops, &net_exit_list);
107060 return error;
107061@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
107062 struct net *net;
107063 LIST_HEAD(net_exit_list);
107064
107065- list_del(&ops->list);
107066+ pax_list_del((struct list_head *)&ops->list);
107067 for_each_net(net)
107068 list_add_tail(&net->exit_list, &net_exit_list);
107069 ops_exit_list(ops, &net_exit_list);
107070@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
107071 mutex_lock(&net_mutex);
107072 error = register_pernet_operations(&pernet_list, ops);
107073 if (!error && (first_device == &pernet_list))
107074- first_device = &ops->list;
107075+ first_device = (struct list_head *)&ops->list;
107076 mutex_unlock(&net_mutex);
107077 return error;
107078 }
107079diff --git a/net/core/netpoll.c b/net/core/netpoll.c
107080index e33937f..b2b4981 100644
107081--- a/net/core/netpoll.c
107082+++ b/net/core/netpoll.c
107083@@ -382,7 +382,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
107084 struct udphdr *udph;
107085 struct iphdr *iph;
107086 struct ethhdr *eth;
107087- static atomic_t ip_ident;
107088+ static atomic_unchecked_t ip_ident;
107089 struct ipv6hdr *ip6h;
107090
107091 udp_len = len + sizeof(*udph);
107092@@ -453,7 +453,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
107093 put_unaligned(0x45, (unsigned char *)iph);
107094 iph->tos = 0;
107095 put_unaligned(htons(ip_len), &(iph->tot_len));
107096- iph->id = htons(atomic_inc_return(&ip_ident));
107097+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
107098 iph->frag_off = 0;
107099 iph->ttl = 64;
107100 iph->protocol = IPPROTO_UDP;
107101diff --git a/net/core/pktgen.c b/net/core/pktgen.c
107102index fc17a9d..d4a3d88 100644
107103--- a/net/core/pktgen.c
107104+++ b/net/core/pktgen.c
107105@@ -3725,7 +3725,7 @@ static int __net_init pg_net_init(struct net *net)
107106 pn->net = net;
107107 INIT_LIST_HEAD(&pn->pktgen_threads);
107108 pn->pktgen_exiting = false;
107109- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
107110+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
107111 if (!pn->proc_dir) {
107112 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
107113 return -ENODEV;
107114diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c
107115deleted file mode 100644
107116index d3027a7..0000000
107117--- a/net/core/ptp_classifier.c
107118+++ /dev/null
107119@@ -1,141 +0,0 @@
107120-/* PTP classifier
107121- *
107122- * This program is free software; you can redistribute it and/or
107123- * modify it under the terms of version 2 of the GNU General Public
107124- * License as published by the Free Software Foundation.
107125- *
107126- * This program is distributed in the hope that it will be useful, but
107127- * WITHOUT ANY WARRANTY; without even the implied warranty of
107128- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
107129- * General Public License for more details.
107130- */
107131-
107132-/* The below program is the bpf_asm (tools/net/) representation of
107133- * the opcode array in the ptp_filter structure.
107134- *
107135- * For convenience, this can easily be altered and reviewed with
107136- * bpf_asm and bpf_dbg, e.g. `./bpf_asm -c prog` where prog is a
107137- * simple file containing the below program:
107138- *
107139- * ldh [12] ; load ethertype
107140- *
107141- * ; PTP over UDP over IPv4 over Ethernet
107142- * test_ipv4:
107143- * jneq #0x800, test_ipv6 ; ETH_P_IP ?
107144- * ldb [23] ; load proto
107145- * jneq #17, drop_ipv4 ; IPPROTO_UDP ?
107146- * ldh [20] ; load frag offset field
107147- * jset #0x1fff, drop_ipv4 ; don't allow fragments
107148- * ldxb 4*([14]&0xf) ; load IP header len
107149- * ldh [x + 16] ; load UDP dst port
107150- * jneq #319, drop_ipv4 ; is port PTP_EV_PORT ?
107151- * ldh [x + 22] ; load payload
107152- * and #0xf ; mask PTP_CLASS_VMASK
107153- * or #0x10 ; PTP_CLASS_IPV4
107154- * ret a ; return PTP class
107155- * drop_ipv4: ret #0x0 ; PTP_CLASS_NONE
107156- *
107157- * ; PTP over UDP over IPv6 over Ethernet
107158- * test_ipv6:
107159- * jneq #0x86dd, test_8021q ; ETH_P_IPV6 ?
107160- * ldb [20] ; load proto
107161- * jneq #17, drop_ipv6 ; IPPROTO_UDP ?
107162- * ldh [56] ; load UDP dst port
107163- * jneq #319, drop_ipv6 ; is port PTP_EV_PORT ?
107164- * ldh [62] ; load payload
107165- * and #0xf ; mask PTP_CLASS_VMASK
107166- * or #0x20 ; PTP_CLASS_IPV6
107167- * ret a ; return PTP class
107168- * drop_ipv6: ret #0x0 ; PTP_CLASS_NONE
107169- *
107170- * ; PTP over 802.1Q over Ethernet
107171- * test_8021q:
107172- * jneq #0x8100, test_ieee1588 ; ETH_P_8021Q ?
107173- * ldh [16] ; load inner type
107174- * jneq #0x88f7, drop_ieee1588 ; ETH_P_1588 ?
107175- * ldb [18] ; load payload
107176- * and #0x8 ; as we don't have ports here, test
107177- * jneq #0x0, drop_ieee1588 ; for PTP_GEN_BIT and drop these
107178- * ldh [18] ; reload payload
107179- * and #0xf ; mask PTP_CLASS_VMASK
107180- * or #0x40 ; PTP_CLASS_V2_VLAN
107181- * ret a ; return PTP class
107182- *
107183- * ; PTP over Ethernet
107184- * test_ieee1588:
107185- * jneq #0x88f7, drop_ieee1588 ; ETH_P_1588 ?
107186- * ldb [14] ; load payload
107187- * and #0x8 ; as we don't have ports here, test
107188- * jneq #0x0, drop_ieee1588 ; for PTP_GEN_BIT and drop these
107189- * ldh [14] ; reload payload
107190- * and #0xf ; mask PTP_CLASS_VMASK
107191- * or #0x30 ; PTP_CLASS_L2
107192- * ret a ; return PTP class
107193- * drop_ieee1588: ret #0x0 ; PTP_CLASS_NONE
107194- */
107195-
107196-#include <linux/skbuff.h>
107197-#include <linux/filter.h>
107198-#include <linux/ptp_classify.h>
107199-
107200-static struct sk_filter *ptp_insns __read_mostly;
107201-
107202-unsigned int ptp_classify_raw(const struct sk_buff *skb)
107203-{
107204- return SK_RUN_FILTER(ptp_insns, skb);
107205-}
107206-EXPORT_SYMBOL_GPL(ptp_classify_raw);
107207-
107208-void __init ptp_classifier_init(void)
107209-{
107210- static struct sock_filter ptp_filter[] __initdata = {
107211- { 0x28, 0, 0, 0x0000000c },
107212- { 0x15, 0, 12, 0x00000800 },
107213- { 0x30, 0, 0, 0x00000017 },
107214- { 0x15, 0, 9, 0x00000011 },
107215- { 0x28, 0, 0, 0x00000014 },
107216- { 0x45, 7, 0, 0x00001fff },
107217- { 0xb1, 0, 0, 0x0000000e },
107218- { 0x48, 0, 0, 0x00000010 },
107219- { 0x15, 0, 4, 0x0000013f },
107220- { 0x48, 0, 0, 0x00000016 },
107221- { 0x54, 0, 0, 0x0000000f },
107222- { 0x44, 0, 0, 0x00000010 },
107223- { 0x16, 0, 0, 0x00000000 },
107224- { 0x06, 0, 0, 0x00000000 },
107225- { 0x15, 0, 9, 0x000086dd },
107226- { 0x30, 0, 0, 0x00000014 },
107227- { 0x15, 0, 6, 0x00000011 },
107228- { 0x28, 0, 0, 0x00000038 },
107229- { 0x15, 0, 4, 0x0000013f },
107230- { 0x28, 0, 0, 0x0000003e },
107231- { 0x54, 0, 0, 0x0000000f },
107232- { 0x44, 0, 0, 0x00000020 },
107233- { 0x16, 0, 0, 0x00000000 },
107234- { 0x06, 0, 0, 0x00000000 },
107235- { 0x15, 0, 9, 0x00008100 },
107236- { 0x28, 0, 0, 0x00000010 },
107237- { 0x15, 0, 15, 0x000088f7 },
107238- { 0x30, 0, 0, 0x00000012 },
107239- { 0x54, 0, 0, 0x00000008 },
107240- { 0x15, 0, 12, 0x00000000 },
107241- { 0x28, 0, 0, 0x00000012 },
107242- { 0x54, 0, 0, 0x0000000f },
107243- { 0x44, 0, 0, 0x00000040 },
107244- { 0x16, 0, 0, 0x00000000 },
107245- { 0x15, 0, 7, 0x000088f7 },
107246- { 0x30, 0, 0, 0x0000000e },
107247- { 0x54, 0, 0, 0x00000008 },
107248- { 0x15, 0, 4, 0x00000000 },
107249- { 0x28, 0, 0, 0x0000000e },
107250- { 0x54, 0, 0, 0x0000000f },
107251- { 0x44, 0, 0, 0x00000030 },
107252- { 0x16, 0, 0, 0x00000000 },
107253- { 0x06, 0, 0, 0x00000000 },
107254- };
107255- struct sock_fprog_kern ptp_prog = {
107256- .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
107257- };
107258-
107259- BUG_ON(sk_unattached_filter_create(&ptp_insns, &ptp_prog));
107260-}
107261diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
107262index 1063996..0729c19 100644
107263--- a/net/core/rtnetlink.c
107264+++ b/net/core/rtnetlink.c
107265@@ -58,7 +58,7 @@ struct rtnl_link {
107266 rtnl_doit_func doit;
107267 rtnl_dumpit_func dumpit;
107268 rtnl_calcit_func calcit;
107269-};
107270+} __no_const;
107271
107272 static DEFINE_MUTEX(rtnl_mutex);
107273
107274@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
107275 if (rtnl_link_ops_get(ops->kind))
107276 return -EEXIST;
107277
107278- if (!ops->dellink)
107279- ops->dellink = unregister_netdevice_queue;
107280+ if (!ops->dellink) {
107281+ pax_open_kernel();
107282+ *(void **)&ops->dellink = unregister_netdevice_queue;
107283+ pax_close_kernel();
107284+ }
107285
107286- list_add_tail(&ops->list, &link_ops);
107287+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
107288 return 0;
107289 }
107290 EXPORT_SYMBOL_GPL(__rtnl_link_register);
107291@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
107292 for_each_net(net) {
107293 __rtnl_kill_links(net, ops);
107294 }
107295- list_del(&ops->list);
107296+ pax_list_del((struct list_head *)&ops->list);
107297 }
107298 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
107299
107300@@ -1008,7 +1011,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
107301 (dev->ifalias &&
107302 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
107303 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
107304- atomic_read(&dev->carrier_changes)))
107305+ atomic_read_unchecked(&dev->carrier_changes)))
107306 goto nla_put_failure;
107307
107308 if (1) {
107309diff --git a/net/core/scm.c b/net/core/scm.c
107310index b442e7e..6f5b5a2 100644
107311--- a/net/core/scm.c
107312+++ b/net/core/scm.c
107313@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
107314 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
107315 {
107316 struct cmsghdr __user *cm
107317- = (__force struct cmsghdr __user *)msg->msg_control;
107318+ = (struct cmsghdr __force_user *)msg->msg_control;
107319 struct cmsghdr cmhdr;
107320 int cmlen = CMSG_LEN(len);
107321 int err;
107322@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
107323 err = -EFAULT;
107324 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
107325 goto out;
107326- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
107327+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
107328 goto out;
107329 cmlen = CMSG_SPACE(len);
107330 if (msg->msg_controllen < cmlen)
107331@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
107332 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
107333 {
107334 struct cmsghdr __user *cm
107335- = (__force struct cmsghdr __user*)msg->msg_control;
107336+ = (struct cmsghdr __force_user *)msg->msg_control;
107337
107338 int fdmax = 0;
107339 int fdnum = scm->fp->count;
107340@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
107341 if (fdnum < fdmax)
107342 fdmax = fdnum;
107343
107344- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
107345+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
107346 i++, cmfptr++)
107347 {
107348 struct socket *sock;
107349diff --git a/net/core/skbuff.c b/net/core/skbuff.c
107350index 58ff88e..af9b458 100644
107351--- a/net/core/skbuff.c
107352+++ b/net/core/skbuff.c
107353@@ -2010,7 +2010,7 @@ EXPORT_SYMBOL(__skb_checksum);
107354 __wsum skb_checksum(const struct sk_buff *skb, int offset,
107355 int len, __wsum csum)
107356 {
107357- const struct skb_checksum_ops ops = {
107358+ static const struct skb_checksum_ops ops = {
107359 .update = csum_partial_ext,
107360 .combine = csum_block_add_ext,
107361 };
107362@@ -3233,13 +3233,15 @@ void __init skb_init(void)
107363 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
107364 sizeof(struct sk_buff),
107365 0,
107366- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
107367+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
107368+ SLAB_NO_SANITIZE,
107369 NULL);
107370 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
107371 (2*sizeof(struct sk_buff)) +
107372 sizeof(atomic_t),
107373 0,
107374- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
107375+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
107376+ SLAB_NO_SANITIZE,
107377 NULL);
107378 }
107379
107380diff --git a/net/core/sock.c b/net/core/sock.c
107381index 026e01f..f54f908 100644
107382--- a/net/core/sock.c
107383+++ b/net/core/sock.c
107384@@ -442,7 +442,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
107385 struct sk_buff_head *list = &sk->sk_receive_queue;
107386
107387 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
107388- atomic_inc(&sk->sk_drops);
107389+ atomic_inc_unchecked(&sk->sk_drops);
107390 trace_sock_rcvqueue_full(sk, skb);
107391 return -ENOMEM;
107392 }
107393@@ -452,7 +452,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
107394 return err;
107395
107396 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
107397- atomic_inc(&sk->sk_drops);
107398+ atomic_inc_unchecked(&sk->sk_drops);
107399 return -ENOBUFS;
107400 }
107401
107402@@ -472,7 +472,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
107403 skb_dst_force(skb);
107404
107405 spin_lock_irqsave(&list->lock, flags);
107406- skb->dropcount = atomic_read(&sk->sk_drops);
107407+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
107408 __skb_queue_tail(list, skb);
107409 spin_unlock_irqrestore(&list->lock, flags);
107410
107411@@ -492,7 +492,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
107412 skb->dev = NULL;
107413
107414 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
107415- atomic_inc(&sk->sk_drops);
107416+ atomic_inc_unchecked(&sk->sk_drops);
107417 goto discard_and_relse;
107418 }
107419 if (nested)
107420@@ -510,7 +510,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
107421 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
107422 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
107423 bh_unlock_sock(sk);
107424- atomic_inc(&sk->sk_drops);
107425+ atomic_inc_unchecked(&sk->sk_drops);
107426 goto discard_and_relse;
107427 }
107428
107429@@ -998,12 +998,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
107430 struct timeval tm;
107431 } v;
107432
107433- int lv = sizeof(int);
107434- int len;
107435+ unsigned int lv = sizeof(int);
107436+ unsigned int len;
107437
107438 if (get_user(len, optlen))
107439 return -EFAULT;
107440- if (len < 0)
107441+ if (len > INT_MAX)
107442 return -EINVAL;
107443
107444 memset(&v, 0, sizeof(v));
107445@@ -1155,11 +1155,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
107446
107447 case SO_PEERNAME:
107448 {
107449- char address[128];
107450+ char address[_K_SS_MAXSIZE];
107451
107452 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
107453 return -ENOTCONN;
107454- if (lv < len)
107455+ if (lv < len || sizeof address < len)
107456 return -EINVAL;
107457 if (copy_to_user(optval, address, len))
107458 return -EFAULT;
107459@@ -1240,7 +1240,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
107460
107461 if (len > lv)
107462 len = lv;
107463- if (copy_to_user(optval, &v, len))
107464+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
107465 return -EFAULT;
107466 lenout:
107467 if (put_user(len, optlen))
107468@@ -2375,7 +2375,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
107469 */
107470 smp_wmb();
107471 atomic_set(&sk->sk_refcnt, 1);
107472- atomic_set(&sk->sk_drops, 0);
107473+ atomic_set_unchecked(&sk->sk_drops, 0);
107474 }
107475 EXPORT_SYMBOL(sock_init_data);
107476
107477@@ -2503,6 +2503,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
107478 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
107479 int level, int type)
107480 {
107481+ struct sock_extended_err ee;
107482 struct sock_exterr_skb *serr;
107483 struct sk_buff *skb, *skb2;
107484 int copied, err;
107485@@ -2524,7 +2525,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
107486 sock_recv_timestamp(msg, sk, skb);
107487
107488 serr = SKB_EXT_ERR(skb);
107489- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
107490+ ee = serr->ee;
107491+ put_cmsg(msg, level, type, sizeof ee, &ee);
107492
107493 msg->msg_flags |= MSG_ERRQUEUE;
107494 err = copied;
107495diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
107496index a4216a4..773e3d7 100644
107497--- a/net/core/sock_diag.c
107498+++ b/net/core/sock_diag.c
107499@@ -9,26 +9,33 @@
107500 #include <linux/inet_diag.h>
107501 #include <linux/sock_diag.h>
107502
107503-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
107504+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
107505 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
107506 static DEFINE_MUTEX(sock_diag_table_mutex);
107507
107508 int sock_diag_check_cookie(void *sk, __u32 *cookie)
107509 {
107510+#ifndef CONFIG_GRKERNSEC_HIDESYM
107511 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
107512 cookie[1] != INET_DIAG_NOCOOKIE) &&
107513 ((u32)(unsigned long)sk != cookie[0] ||
107514 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
107515 return -ESTALE;
107516 else
107517+#endif
107518 return 0;
107519 }
107520 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
107521
107522 void sock_diag_save_cookie(void *sk, __u32 *cookie)
107523 {
107524+#ifdef CONFIG_GRKERNSEC_HIDESYM
107525+ cookie[0] = 0;
107526+ cookie[1] = 0;
107527+#else
107528 cookie[0] = (u32)(unsigned long)sk;
107529 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
107530+#endif
107531 }
107532 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
107533
107534@@ -52,10 +59,9 @@ EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
107535 int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
107536 struct sk_buff *skb, int attrtype)
107537 {
107538- struct sock_fprog_kern *fprog;
107539- struct sk_filter *filter;
107540 struct nlattr *attr;
107541- unsigned int flen;
107542+ struct sk_filter *filter;
107543+ unsigned int len;
107544 int err = 0;
107545
107546 if (!may_report_filterinfo) {
107547@@ -64,20 +70,24 @@ int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
107548 }
107549
107550 rcu_read_lock();
107551+
107552 filter = rcu_dereference(sk->sk_filter);
107553- if (!filter)
107554- goto out;
107555+ len = filter ? filter->len * sizeof(struct sock_filter) : 0;
107556
107557- fprog = filter->orig_prog;
107558- flen = sk_filter_proglen(fprog);
107559-
107560- attr = nla_reserve(skb, attrtype, flen);
107561+ attr = nla_reserve(skb, attrtype, len);
107562 if (attr == NULL) {
107563 err = -EMSGSIZE;
107564 goto out;
107565 }
107566
107567- memcpy(nla_data(attr), fprog->filter, flen);
107568+ if (filter) {
107569+ struct sock_filter *fb = (struct sock_filter *)nla_data(attr);
107570+ int i;
107571+
107572+ for (i = 0; i < filter->len; i++, fb++)
107573+ sk_decode_filter(&filter->insns[i], fb);
107574+ }
107575+
107576 out:
107577 rcu_read_unlock();
107578 return err;
107579@@ -110,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
107580 mutex_lock(&sock_diag_table_mutex);
107581 if (sock_diag_handlers[hndl->family])
107582 err = -EBUSY;
107583- else
107584+ else {
107585+ pax_open_kernel();
107586 sock_diag_handlers[hndl->family] = hndl;
107587+ pax_close_kernel();
107588+ }
107589 mutex_unlock(&sock_diag_table_mutex);
107590
107591 return err;
107592@@ -127,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
107593
107594 mutex_lock(&sock_diag_table_mutex);
107595 BUG_ON(sock_diag_handlers[family] != hnld);
107596+ pax_open_kernel();
107597 sock_diag_handlers[family] = NULL;
107598+ pax_close_kernel();
107599 mutex_unlock(&sock_diag_table_mutex);
107600 }
107601 EXPORT_SYMBOL_GPL(sock_diag_unregister);
107602diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
107603index cf9cd13..8b56af3 100644
107604--- a/net/core/sysctl_net_core.c
107605+++ b/net/core/sysctl_net_core.c
107606@@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
107607 {
107608 unsigned int orig_size, size;
107609 int ret, i;
107610- struct ctl_table tmp = {
107611+ ctl_table_no_const tmp = {
107612 .data = &size,
107613 .maxlen = sizeof(size),
107614 .mode = table->mode
107615@@ -200,7 +200,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
107616 void __user *buffer, size_t *lenp, loff_t *ppos)
107617 {
107618 char id[IFNAMSIZ];
107619- struct ctl_table tbl = {
107620+ ctl_table_no_const tbl = {
107621 .data = id,
107622 .maxlen = IFNAMSIZ,
107623 };
107624@@ -379,13 +379,12 @@ static struct ctl_table netns_core_table[] = {
107625
107626 static __net_init int sysctl_core_net_init(struct net *net)
107627 {
107628- struct ctl_table *tbl;
107629+ ctl_table_no_const *tbl = NULL;
107630
107631 net->core.sysctl_somaxconn = SOMAXCONN;
107632
107633- tbl = netns_core_table;
107634 if (!net_eq(net, &init_net)) {
107635- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
107636+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
107637 if (tbl == NULL)
107638 goto err_dup;
107639
107640@@ -395,17 +394,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
107641 if (net->user_ns != &init_user_ns) {
107642 tbl[0].procname = NULL;
107643 }
107644- }
107645-
107646- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
107647+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
107648+ } else
107649+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
107650 if (net->core.sysctl_hdr == NULL)
107651 goto err_reg;
107652
107653 return 0;
107654
107655 err_reg:
107656- if (tbl != netns_core_table)
107657- kfree(tbl);
107658+ kfree(tbl);
107659 err_dup:
107660 return -ENOMEM;
107661 }
107662@@ -420,7 +418,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
107663 kfree(tbl);
107664 }
107665
107666-static __net_initdata struct pernet_operations sysctl_core_ops = {
107667+static __net_initconst struct pernet_operations sysctl_core_ops = {
107668 .init = sysctl_core_net_init,
107669 .exit = sysctl_core_net_exit,
107670 };
107671diff --git a/net/core/timestamping.c b/net/core/timestamping.c
107672index 6521dfd..661b5a4 100644
107673--- a/net/core/timestamping.c
107674+++ b/net/core/timestamping.c
107675@@ -23,11 +23,16 @@
107676 #include <linux/skbuff.h>
107677 #include <linux/export.h>
107678
107679+static struct sock_filter ptp_filter[] = {
107680+ PTP_FILTER
107681+};
107682+
107683 static unsigned int classify(const struct sk_buff *skb)
107684 {
107685- if (likely(skb->dev && skb->dev->phydev &&
107686+ if (likely(skb->dev &&
107687+ skb->dev->phydev &&
107688 skb->dev->phydev->drv))
107689- return ptp_classify_raw(skb);
107690+ return sk_run_filter(skb, ptp_filter);
107691 else
107692 return PTP_CLASS_NONE;
107693 }
107694@@ -55,13 +60,11 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
107695 if (likely(phydev->drv->txtstamp)) {
107696 if (!atomic_inc_not_zero(&sk->sk_refcnt))
107697 return;
107698-
107699 clone = skb_clone(skb, GFP_ATOMIC);
107700 if (!clone) {
107701 sock_put(sk);
107702 return;
107703 }
107704-
107705 clone->sk = sk;
107706 phydev->drv->txtstamp(phydev, clone, type);
107707 }
107708@@ -86,15 +89,12 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
107709 }
107710
107711 *skb_hwtstamps(skb) = *hwtstamps;
107712-
107713 serr = SKB_EXT_ERR(skb);
107714 memset(serr, 0, sizeof(*serr));
107715 serr->ee.ee_errno = ENOMSG;
107716 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
107717 skb->sk = NULL;
107718-
107719 err = sock_queue_err_skb(sk, skb);
107720-
107721 sock_put(sk);
107722 if (err)
107723 kfree_skb(skb);
107724@@ -132,3 +132,8 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
107725 return false;
107726 }
107727 EXPORT_SYMBOL_GPL(skb_defer_rx_timestamp);
107728+
107729+void __init skb_timestamping_init(void)
107730+{
107731+ BUG_ON(sk_chk_filter(ptp_filter, ARRAY_SIZE(ptp_filter)));
107732+}
107733diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
107734index ae011b4..d2d18bf 100644
107735--- a/net/decnet/af_decnet.c
107736+++ b/net/decnet/af_decnet.c
107737@@ -465,6 +465,7 @@ static struct proto dn_proto = {
107738 .sysctl_rmem = sysctl_decnet_rmem,
107739 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
107740 .obj_size = sizeof(struct dn_sock),
107741+ .slab_flags = SLAB_USERCOPY,
107742 };
107743
107744 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
107745diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
107746index 3b726f3..1af6368 100644
107747--- a/net/decnet/dn_dev.c
107748+++ b/net/decnet/dn_dev.c
107749@@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table {
107750 .extra1 = &min_t3,
107751 .extra2 = &max_t3
107752 },
107753- {0}
107754+ { }
107755 },
107756 };
107757
107758diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
107759index 5325b54..a0d4d69 100644
107760--- a/net/decnet/sysctl_net_decnet.c
107761+++ b/net/decnet/sysctl_net_decnet.c
107762@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
107763
107764 if (len > *lenp) len = *lenp;
107765
107766- if (copy_to_user(buffer, addr, len))
107767+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
107768 return -EFAULT;
107769
107770 *lenp = len;
107771@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
107772
107773 if (len > *lenp) len = *lenp;
107774
107775- if (copy_to_user(buffer, devname, len))
107776+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
107777 return -EFAULT;
107778
107779 *lenp = len;
107780diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
107781index 6f1428c..9586b83 100644
107782--- a/net/ieee802154/reassembly.c
107783+++ b/net/ieee802154/reassembly.c
107784@@ -438,14 +438,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
107785
107786 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
107787 {
107788- struct ctl_table *table;
107789+ ctl_table_no_const *table = NULL;
107790 struct ctl_table_header *hdr;
107791 struct netns_ieee802154_lowpan *ieee802154_lowpan =
107792 net_ieee802154_lowpan(net);
107793
107794- table = lowpan_frags_ns_ctl_table;
107795 if (!net_eq(net, &init_net)) {
107796- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
107797+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
107798 GFP_KERNEL);
107799 if (table == NULL)
107800 goto err_alloc;
107801@@ -458,9 +457,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
107802 /* Don't export sysctls to unprivileged users */
107803 if (net->user_ns != &init_user_ns)
107804 table[0].procname = NULL;
107805- }
107806-
107807- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
107808+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
107809+ } else
107810+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
107811 if (hdr == NULL)
107812 goto err_reg;
107813
107814@@ -468,8 +467,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
107815 return 0;
107816
107817 err_reg:
107818- if (!net_eq(net, &init_net))
107819- kfree(table);
107820+ kfree(table);
107821 err_alloc:
107822 return -ENOMEM;
107823 }
107824diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
107825index e944937..368fe78 100644
107826--- a/net/ipv4/devinet.c
107827+++ b/net/ipv4/devinet.c
107828@@ -1540,7 +1540,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
107829 idx = 0;
107830 head = &net->dev_index_head[h];
107831 rcu_read_lock();
107832- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
107833+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
107834 net->dev_base_seq;
107835 hlist_for_each_entry_rcu(dev, head, index_hlist) {
107836 if (idx < s_idx)
107837@@ -1858,7 +1858,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
107838 idx = 0;
107839 head = &net->dev_index_head[h];
107840 rcu_read_lock();
107841- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
107842+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
107843 net->dev_base_seq;
107844 hlist_for_each_entry_rcu(dev, head, index_hlist) {
107845 if (idx < s_idx)
107846@@ -2093,7 +2093,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
107847 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
107848 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
107849
107850-static struct devinet_sysctl_table {
107851+static const struct devinet_sysctl_table {
107852 struct ctl_table_header *sysctl_header;
107853 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
107854 } devinet_sysctl = {
107855@@ -2215,7 +2215,7 @@ static __net_init int devinet_init_net(struct net *net)
107856 int err;
107857 struct ipv4_devconf *all, *dflt;
107858 #ifdef CONFIG_SYSCTL
107859- struct ctl_table *tbl = ctl_forward_entry;
107860+ ctl_table_no_const *tbl = NULL;
107861 struct ctl_table_header *forw_hdr;
107862 #endif
107863
107864@@ -2233,7 +2233,7 @@ static __net_init int devinet_init_net(struct net *net)
107865 goto err_alloc_dflt;
107866
107867 #ifdef CONFIG_SYSCTL
107868- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
107869+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
107870 if (tbl == NULL)
107871 goto err_alloc_ctl;
107872
107873@@ -2253,7 +2253,10 @@ static __net_init int devinet_init_net(struct net *net)
107874 goto err_reg_dflt;
107875
107876 err = -ENOMEM;
107877- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
107878+ if (!net_eq(net, &init_net))
107879+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
107880+ else
107881+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
107882 if (forw_hdr == NULL)
107883 goto err_reg_ctl;
107884 net->ipv4.forw_hdr = forw_hdr;
107885@@ -2269,8 +2272,7 @@ err_reg_ctl:
107886 err_reg_dflt:
107887 __devinet_sysctl_unregister(all);
107888 err_reg_all:
107889- if (tbl != ctl_forward_entry)
107890- kfree(tbl);
107891+ kfree(tbl);
107892 err_alloc_ctl:
107893 #endif
107894 if (dflt != &ipv4_devconf_dflt)
107895diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
107896index 255aa99..45c78f8 100644
107897--- a/net/ipv4/fib_frontend.c
107898+++ b/net/ipv4/fib_frontend.c
107899@@ -1015,12 +1015,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
107900 #ifdef CONFIG_IP_ROUTE_MULTIPATH
107901 fib_sync_up(dev);
107902 #endif
107903- atomic_inc(&net->ipv4.dev_addr_genid);
107904+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
107905 rt_cache_flush(dev_net(dev));
107906 break;
107907 case NETDEV_DOWN:
107908 fib_del_ifaddr(ifa, NULL);
107909- atomic_inc(&net->ipv4.dev_addr_genid);
107910+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
107911 if (ifa->ifa_dev->ifa_list == NULL) {
107912 /* Last address was deleted from this interface.
107913 * Disable IP.
107914@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
107915 #ifdef CONFIG_IP_ROUTE_MULTIPATH
107916 fib_sync_up(dev);
107917 #endif
107918- atomic_inc(&net->ipv4.dev_addr_genid);
107919+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
107920 rt_cache_flush(net);
107921 break;
107922 case NETDEV_DOWN:
107923diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
107924index b10cd43a..22327f9 100644
107925--- a/net/ipv4/fib_semantics.c
107926+++ b/net/ipv4/fib_semantics.c
107927@@ -768,7 +768,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
107928 nh->nh_saddr = inet_select_addr(nh->nh_dev,
107929 nh->nh_gw,
107930 nh->nh_parent->fib_scope);
107931- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
107932+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
107933
107934 return nh->nh_saddr;
107935 }
107936diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
107937index 43116e8..e3e6159 100644
107938--- a/net/ipv4/inet_hashtables.c
107939+++ b/net/ipv4/inet_hashtables.c
107940@@ -18,6 +18,7 @@
107941 #include <linux/sched.h>
107942 #include <linux/slab.h>
107943 #include <linux/wait.h>
107944+#include <linux/security.h>
107945
107946 #include <net/inet_connection_sock.h>
107947 #include <net/inet_hashtables.h>
107948@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
107949 return inet_ehashfn(net, laddr, lport, faddr, fport);
107950 }
107951
107952+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
107953+
107954 /*
107955 * Allocate and initialize a new local port bind bucket.
107956 * The bindhash mutex for snum's hash chain must be held here.
107957@@ -554,6 +557,8 @@ ok:
107958 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
107959 spin_unlock(&head->lock);
107960
107961+ gr_update_task_in_ip_table(current, inet_sk(sk));
107962+
107963 if (tw) {
107964 inet_twsk_deschedule(tw, death_row);
107965 while (twrefcnt) {
107966diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
107967index bd5f592..e80e605 100644
107968--- a/net/ipv4/inetpeer.c
107969+++ b/net/ipv4/inetpeer.c
107970@@ -482,7 +482,7 @@ relookup:
107971 if (p) {
107972 p->daddr = *daddr;
107973 atomic_set(&p->refcnt, 1);
107974- atomic_set(&p->rid, 0);
107975+ atomic_set_unchecked(&p->rid, 0);
107976 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
107977 p->rate_tokens = 0;
107978 /* 60*HZ is arbitrary, but chosen enough high so that the first
107979diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
107980index ed32313..3762abe 100644
107981--- a/net/ipv4/ip_fragment.c
107982+++ b/net/ipv4/ip_fragment.c
107983@@ -284,7 +284,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
107984 return 0;
107985
107986 start = qp->rid;
107987- end = atomic_inc_return(&peer->rid);
107988+ end = atomic_inc_return_unchecked(&peer->rid);
107989 qp->rid = end;
107990
107991 rc = qp->q.fragments && (end - start) > max;
107992@@ -761,12 +761,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
107993
107994 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
107995 {
107996- struct ctl_table *table;
107997+ ctl_table_no_const *table = NULL;
107998 struct ctl_table_header *hdr;
107999
108000- table = ip4_frags_ns_ctl_table;
108001 if (!net_eq(net, &init_net)) {
108002- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
108003+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
108004 if (table == NULL)
108005 goto err_alloc;
108006
108007@@ -777,9 +776,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
108008 /* Don't export sysctls to unprivileged users */
108009 if (net->user_ns != &init_user_ns)
108010 table[0].procname = NULL;
108011- }
108012+ hdr = register_net_sysctl(net, "net/ipv4", table);
108013+ } else
108014+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
108015
108016- hdr = register_net_sysctl(net, "net/ipv4", table);
108017 if (hdr == NULL)
108018 goto err_reg;
108019
108020@@ -787,8 +787,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
108021 return 0;
108022
108023 err_reg:
108024- if (!net_eq(net, &init_net))
108025- kfree(table);
108026+ kfree(table);
108027 err_alloc:
108028 return -ENOMEM;
108029 }
108030diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
108031index 9b84254..c776611 100644
108032--- a/net/ipv4/ip_gre.c
108033+++ b/net/ipv4/ip_gre.c
108034@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
108035 module_param(log_ecn_error, bool, 0644);
108036 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
108037
108038-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
108039+static struct rtnl_link_ops ipgre_link_ops;
108040 static int ipgre_tunnel_init(struct net_device *dev);
108041
108042 static int ipgre_net_id __read_mostly;
108043@@ -733,7 +733,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
108044 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
108045 };
108046
108047-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
108048+static struct rtnl_link_ops ipgre_link_ops = {
108049 .kind = "gre",
108050 .maxtype = IFLA_GRE_MAX,
108051 .policy = ipgre_policy,
108052@@ -747,7 +747,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
108053 .fill_info = ipgre_fill_info,
108054 };
108055
108056-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
108057+static struct rtnl_link_ops ipgre_tap_ops = {
108058 .kind = "gretap",
108059 .maxtype = IFLA_GRE_MAX,
108060 .policy = ipgre_policy,
108061diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
108062index 64741b9..6f334a2 100644
108063--- a/net/ipv4/ip_sockglue.c
108064+++ b/net/ipv4/ip_sockglue.c
108065@@ -1188,7 +1188,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
108066 len = min_t(unsigned int, len, opt->optlen);
108067 if (put_user(len, optlen))
108068 return -EFAULT;
108069- if (copy_to_user(optval, opt->__data, len))
108070+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
108071+ copy_to_user(optval, opt->__data, len))
108072 return -EFAULT;
108073 return 0;
108074 }
108075@@ -1319,7 +1320,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
108076 if (sk->sk_type != SOCK_STREAM)
108077 return -ENOPROTOOPT;
108078
108079- msg.msg_control = optval;
108080+ msg.msg_control = (void __force_kernel *)optval;
108081 msg.msg_controllen = len;
108082 msg.msg_flags = flags;
108083
108084diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
108085index b8960f3..0f025db 100644
108086--- a/net/ipv4/ip_vti.c
108087+++ b/net/ipv4/ip_vti.c
108088@@ -45,7 +45,7 @@
108089 #include <net/net_namespace.h>
108090 #include <net/netns/generic.h>
108091
108092-static struct rtnl_link_ops vti_link_ops __read_mostly;
108093+static struct rtnl_link_ops vti_link_ops;
108094
108095 static int vti_net_id __read_mostly;
108096 static int vti_tunnel_init(struct net_device *dev);
108097@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
108098 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
108099 };
108100
108101-static struct rtnl_link_ops vti_link_ops __read_mostly = {
108102+static struct rtnl_link_ops vti_link_ops = {
108103 .kind = "vti",
108104 .maxtype = IFLA_VTI_MAX,
108105 .policy = vti_policy,
108106diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
108107index b3e86ea..18ce98c 100644
108108--- a/net/ipv4/ipconfig.c
108109+++ b/net/ipv4/ipconfig.c
108110@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
108111
108112 mm_segment_t oldfs = get_fs();
108113 set_fs(get_ds());
108114- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
108115+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
108116 set_fs(oldfs);
108117 return res;
108118 }
108119@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
108120
108121 mm_segment_t oldfs = get_fs();
108122 set_fs(get_ds());
108123- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
108124+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
108125 set_fs(oldfs);
108126 return res;
108127 }
108128@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
108129
108130 mm_segment_t oldfs = get_fs();
108131 set_fs(get_ds());
108132- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
108133+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
108134 set_fs(oldfs);
108135 return res;
108136 }
108137diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
108138index 62eaa00..29b2dc2 100644
108139--- a/net/ipv4/ipip.c
108140+++ b/net/ipv4/ipip.c
108141@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
108142 static int ipip_net_id __read_mostly;
108143
108144 static int ipip_tunnel_init(struct net_device *dev);
108145-static struct rtnl_link_ops ipip_link_ops __read_mostly;
108146+static struct rtnl_link_ops ipip_link_ops;
108147
108148 static int ipip_err(struct sk_buff *skb, u32 info)
108149 {
108150@@ -409,7 +409,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
108151 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
108152 };
108153
108154-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
108155+static struct rtnl_link_ops ipip_link_ops = {
108156 .kind = "ipip",
108157 .maxtype = IFLA_IPTUN_MAX,
108158 .policy = ipip_policy,
108159diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
108160index f95b6f9..2ee2097 100644
108161--- a/net/ipv4/netfilter/arp_tables.c
108162+++ b/net/ipv4/netfilter/arp_tables.c
108163@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
108164 #endif
108165
108166 static int get_info(struct net *net, void __user *user,
108167- const int *len, int compat)
108168+ int len, int compat)
108169 {
108170 char name[XT_TABLE_MAXNAMELEN];
108171 struct xt_table *t;
108172 int ret;
108173
108174- if (*len != sizeof(struct arpt_getinfo)) {
108175- duprintf("length %u != %Zu\n", *len,
108176+ if (len != sizeof(struct arpt_getinfo)) {
108177+ duprintf("length %u != %Zu\n", len,
108178 sizeof(struct arpt_getinfo));
108179 return -EINVAL;
108180 }
108181@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
108182 info.size = private->size;
108183 strcpy(info.name, name);
108184
108185- if (copy_to_user(user, &info, *len) != 0)
108186+ if (copy_to_user(user, &info, len) != 0)
108187 ret = -EFAULT;
108188 else
108189 ret = 0;
108190@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
108191
108192 switch (cmd) {
108193 case ARPT_SO_GET_INFO:
108194- ret = get_info(sock_net(sk), user, len, 1);
108195+ ret = get_info(sock_net(sk), user, *len, 1);
108196 break;
108197 case ARPT_SO_GET_ENTRIES:
108198 ret = compat_get_entries(sock_net(sk), user, len);
108199@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
108200
108201 switch (cmd) {
108202 case ARPT_SO_GET_INFO:
108203- ret = get_info(sock_net(sk), user, len, 0);
108204+ ret = get_info(sock_net(sk), user, *len, 0);
108205 break;
108206
108207 case ARPT_SO_GET_ENTRIES:
108208diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
108209index 99e810f..3711b81 100644
108210--- a/net/ipv4/netfilter/ip_tables.c
108211+++ b/net/ipv4/netfilter/ip_tables.c
108212@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
108213 #endif
108214
108215 static int get_info(struct net *net, void __user *user,
108216- const int *len, int compat)
108217+ int len, int compat)
108218 {
108219 char name[XT_TABLE_MAXNAMELEN];
108220 struct xt_table *t;
108221 int ret;
108222
108223- if (*len != sizeof(struct ipt_getinfo)) {
108224- duprintf("length %u != %zu\n", *len,
108225+ if (len != sizeof(struct ipt_getinfo)) {
108226+ duprintf("length %u != %zu\n", len,
108227 sizeof(struct ipt_getinfo));
108228 return -EINVAL;
108229 }
108230@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
108231 info.size = private->size;
108232 strcpy(info.name, name);
108233
108234- if (copy_to_user(user, &info, *len) != 0)
108235+ if (copy_to_user(user, &info, len) != 0)
108236 ret = -EFAULT;
108237 else
108238 ret = 0;
108239@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
108240
108241 switch (cmd) {
108242 case IPT_SO_GET_INFO:
108243- ret = get_info(sock_net(sk), user, len, 1);
108244+ ret = get_info(sock_net(sk), user, *len, 1);
108245 break;
108246 case IPT_SO_GET_ENTRIES:
108247 ret = compat_get_entries(sock_net(sk), user, len);
108248@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
108249
108250 switch (cmd) {
108251 case IPT_SO_GET_INFO:
108252- ret = get_info(sock_net(sk), user, len, 0);
108253+ ret = get_info(sock_net(sk), user, *len, 0);
108254 break;
108255
108256 case IPT_SO_GET_ENTRIES:
108257diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
108258index 2510c02..cfb34fa 100644
108259--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
108260+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
108261@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
108262 spin_lock_init(&cn->lock);
108263
108264 #ifdef CONFIG_PROC_FS
108265- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
108266+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
108267 if (!cn->procdir) {
108268 pr_err("Unable to proc dir entry\n");
108269 return -ENOMEM;
108270diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
108271index 044a0dd..3399751 100644
108272--- a/net/ipv4/ping.c
108273+++ b/net/ipv4/ping.c
108274@@ -59,7 +59,7 @@ struct ping_table {
108275 };
108276
108277 static struct ping_table ping_table;
108278-struct pingv6_ops pingv6_ops;
108279+struct pingv6_ops *pingv6_ops;
108280 EXPORT_SYMBOL_GPL(pingv6_ops);
108281
108282 static u16 ping_port_rover;
108283@@ -348,7 +348,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
108284 return -ENODEV;
108285 }
108286 }
108287- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
108288+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
108289 scoped);
108290 rcu_read_unlock();
108291
108292@@ -556,7 +556,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
108293 }
108294 #if IS_ENABLED(CONFIG_IPV6)
108295 } else if (skb->protocol == htons(ETH_P_IPV6)) {
108296- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
108297+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
108298 #endif
108299 }
108300
108301@@ -574,7 +574,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
108302 info, (u8 *)icmph);
108303 #if IS_ENABLED(CONFIG_IPV6)
108304 } else if (family == AF_INET6) {
108305- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
108306+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
108307 info, (u8 *)icmph);
108308 #endif
108309 }
108310@@ -858,7 +858,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
108311 return ip_recv_error(sk, msg, len, addr_len);
108312 #if IS_ENABLED(CONFIG_IPV6)
108313 } else if (family == AF_INET6) {
108314- return pingv6_ops.ipv6_recv_error(sk, msg, len,
108315+ return pingv6_ops->ipv6_recv_error(sk, msg, len,
108316 addr_len);
108317 #endif
108318 }
108319@@ -916,10 +916,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
108320 }
108321
108322 if (inet6_sk(sk)->rxopt.all)
108323- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
108324+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
108325 if (skb->protocol == htons(ETH_P_IPV6) &&
108326 inet6_sk(sk)->rxopt.all)
108327- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
108328+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
108329 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
108330 ip_cmsg_recv(msg, skb);
108331 #endif
108332@@ -1111,7 +1111,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
108333 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
108334 0, sock_i_ino(sp),
108335 atomic_read(&sp->sk_refcnt), sp,
108336- atomic_read(&sp->sk_drops));
108337+ atomic_read_unchecked(&sp->sk_drops));
108338 }
108339
108340 static int ping_v4_seq_show(struct seq_file *seq, void *v)
108341diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
108342index 2c65160..213ecdf 100644
108343--- a/net/ipv4/raw.c
108344+++ b/net/ipv4/raw.c
108345@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
108346 int raw_rcv(struct sock *sk, struct sk_buff *skb)
108347 {
108348 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
108349- atomic_inc(&sk->sk_drops);
108350+ atomic_inc_unchecked(&sk->sk_drops);
108351 kfree_skb(skb);
108352 return NET_RX_DROP;
108353 }
108354@@ -748,16 +748,20 @@ static int raw_init(struct sock *sk)
108355
108356 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
108357 {
108358+ struct icmp_filter filter;
108359+
108360 if (optlen > sizeof(struct icmp_filter))
108361 optlen = sizeof(struct icmp_filter);
108362- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
108363+ if (copy_from_user(&filter, optval, optlen))
108364 return -EFAULT;
108365+ raw_sk(sk)->filter = filter;
108366 return 0;
108367 }
108368
108369 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
108370 {
108371 int len, ret = -EFAULT;
108372+ struct icmp_filter filter;
108373
108374 if (get_user(len, optlen))
108375 goto out;
108376@@ -767,8 +771,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
108377 if (len > sizeof(struct icmp_filter))
108378 len = sizeof(struct icmp_filter);
108379 ret = -EFAULT;
108380- if (put_user(len, optlen) ||
108381- copy_to_user(optval, &raw_sk(sk)->filter, len))
108382+ filter = raw_sk(sk)->filter;
108383+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
108384 goto out;
108385 ret = 0;
108386 out: return ret;
108387@@ -997,7 +1001,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
108388 0, 0L, 0,
108389 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
108390 0, sock_i_ino(sp),
108391- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
108392+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
108393 }
108394
108395 static int raw_seq_show(struct seq_file *seq, void *v)
108396diff --git a/net/ipv4/route.c b/net/ipv4/route.c
108397index 1901998..a9a850a 100644
108398--- a/net/ipv4/route.c
108399+++ b/net/ipv4/route.c
108400@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
108401
108402 static int rt_cache_seq_open(struct inode *inode, struct file *file)
108403 {
108404- return seq_open(file, &rt_cache_seq_ops);
108405+ return seq_open_restrict(file, &rt_cache_seq_ops);
108406 }
108407
108408 static const struct file_operations rt_cache_seq_fops = {
108409@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
108410
108411 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
108412 {
108413- return seq_open(file, &rt_cpu_seq_ops);
108414+ return seq_open_restrict(file, &rt_cpu_seq_ops);
108415 }
108416
108417 static const struct file_operations rt_cpu_seq_fops = {
108418@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
108419
108420 static int rt_acct_proc_open(struct inode *inode, struct file *file)
108421 {
108422- return single_open(file, rt_acct_proc_show, NULL);
108423+ return single_open_restrict(file, rt_acct_proc_show, NULL);
108424 }
108425
108426 static const struct file_operations rt_acct_proc_fops = {
108427@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
108428
108429 #define IP_IDENTS_SZ 2048u
108430 struct ip_ident_bucket {
108431- atomic_t id;
108432+ atomic_unchecked_t id;
108433 u32 stamp32;
108434 };
108435
108436-static struct ip_ident_bucket *ip_idents __read_mostly;
108437+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
108438
108439 /* In order to protect privacy, we add a perturbation to identifiers
108440 * if one generator is seldom used. This makes hard for an attacker
108441@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
108442 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
108443 delta = prandom_u32_max(now - old);
108444
108445- return atomic_add_return(segs + delta, &bucket->id) - segs;
108446+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
108447 }
108448 EXPORT_SYMBOL(ip_idents_reserve);
108449
108450@@ -2625,34 +2625,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
108451 .maxlen = sizeof(int),
108452 .mode = 0200,
108453 .proc_handler = ipv4_sysctl_rtcache_flush,
108454+ .extra1 = &init_net,
108455 },
108456 { },
108457 };
108458
108459 static __net_init int sysctl_route_net_init(struct net *net)
108460 {
108461- struct ctl_table *tbl;
108462+ ctl_table_no_const *tbl = NULL;
108463
108464- tbl = ipv4_route_flush_table;
108465 if (!net_eq(net, &init_net)) {
108466- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
108467+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
108468 if (tbl == NULL)
108469 goto err_dup;
108470
108471 /* Don't export sysctls to unprivileged users */
108472 if (net->user_ns != &init_user_ns)
108473 tbl[0].procname = NULL;
108474- }
108475- tbl[0].extra1 = net;
108476+ tbl[0].extra1 = net;
108477+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
108478+ } else
108479+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
108480
108481- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
108482 if (net->ipv4.route_hdr == NULL)
108483 goto err_reg;
108484 return 0;
108485
108486 err_reg:
108487- if (tbl != ipv4_route_flush_table)
108488- kfree(tbl);
108489+ kfree(tbl);
108490 err_dup:
108491 return -ENOMEM;
108492 }
108493@@ -2675,8 +2675,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
108494
108495 static __net_init int rt_genid_init(struct net *net)
108496 {
108497- atomic_set(&net->ipv4.rt_genid, 0);
108498- atomic_set(&net->fnhe_genid, 0);
108499+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
108500+ atomic_set_unchecked(&net->fnhe_genid, 0);
108501 get_random_bytes(&net->ipv4.dev_addr_genid,
108502 sizeof(net->ipv4.dev_addr_genid));
108503 return 0;
108504@@ -2719,11 +2719,7 @@ int __init ip_rt_init(void)
108505 {
108506 int rc = 0;
108507
108508- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
108509- if (!ip_idents)
108510- panic("IP: failed to allocate ip_idents\n");
108511-
108512- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
108513+ prandom_bytes(ip_idents, sizeof(ip_idents));
108514
108515 #ifdef CONFIG_IP_ROUTE_CLASSID
108516 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
108517diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
108518index 79a007c..5023029 100644
108519--- a/net/ipv4/sysctl_net_ipv4.c
108520+++ b/net/ipv4/sysctl_net_ipv4.c
108521@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
108522 container_of(table->data, struct net, ipv4.ip_local_ports.range);
108523 int ret;
108524 int range[2];
108525- struct ctl_table tmp = {
108526+ ctl_table_no_const tmp = {
108527 .data = &range,
108528 .maxlen = sizeof(range),
108529 .mode = table->mode,
108530@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
108531 int ret;
108532 gid_t urange[2];
108533 kgid_t low, high;
108534- struct ctl_table tmp = {
108535+ ctl_table_no_const tmp = {
108536 .data = &urange,
108537 .maxlen = sizeof(urange),
108538 .mode = table->mode,
108539@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
108540 void __user *buffer, size_t *lenp, loff_t *ppos)
108541 {
108542 char val[TCP_CA_NAME_MAX];
108543- struct ctl_table tbl = {
108544+ ctl_table_no_const tbl = {
108545 .data = val,
108546 .maxlen = TCP_CA_NAME_MAX,
108547 };
108548@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
108549 void __user *buffer, size_t *lenp,
108550 loff_t *ppos)
108551 {
108552- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
108553+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
108554 int ret;
108555
108556 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
108557@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
108558 void __user *buffer, size_t *lenp,
108559 loff_t *ppos)
108560 {
108561- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
108562+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
108563 int ret;
108564
108565 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
108566@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
108567 void __user *buffer, size_t *lenp,
108568 loff_t *ppos)
108569 {
108570- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
108571+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
108572 struct tcp_fastopen_context *ctxt;
108573 int ret;
108574 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
108575@@ -857,13 +857,12 @@ static struct ctl_table ipv4_net_table[] = {
108576
108577 static __net_init int ipv4_sysctl_init_net(struct net *net)
108578 {
108579- struct ctl_table *table;
108580+ ctl_table_no_const *table = NULL;
108581
108582- table = ipv4_net_table;
108583 if (!net_eq(net, &init_net)) {
108584 int i;
108585
108586- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
108587+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
108588 if (table == NULL)
108589 goto err_alloc;
108590
108591@@ -872,7 +871,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
108592 table[i].data += (void *)net - (void *)&init_net;
108593 }
108594
108595- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
108596+ if (!net_eq(net, &init_net))
108597+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
108598+ else
108599+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
108600 if (net->ipv4.ipv4_hdr == NULL)
108601 goto err_reg;
108602
108603diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
108604index 40639c2..dfc86b2 100644
108605--- a/net/ipv4/tcp_input.c
108606+++ b/net/ipv4/tcp_input.c
108607@@ -754,7 +754,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
108608 * without any lock. We want to make sure compiler wont store
108609 * intermediate values in this location.
108610 */
108611- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
108612+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
108613 sk->sk_max_pacing_rate);
108614 }
108615
108616@@ -4478,7 +4478,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
108617 * simplifies code)
108618 */
108619 static void
108620-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
108621+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
108622 struct sk_buff *head, struct sk_buff *tail,
108623 u32 start, u32 end)
108624 {
108625@@ -5536,6 +5536,7 @@ discard:
108626 tcp_paws_reject(&tp->rx_opt, 0))
108627 goto discard_and_undo;
108628
108629+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
108630 if (th->syn) {
108631 /* We see SYN without ACK. It is attempt of
108632 * simultaneous connect with crossed SYNs.
108633@@ -5586,6 +5587,7 @@ discard:
108634 goto discard;
108635 #endif
108636 }
108637+#endif
108638 /* "fifth, if neither of the SYN or RST bits is set then
108639 * drop the segment and return."
108640 */
108641@@ -5632,7 +5634,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
108642 goto discard;
108643
108644 if (th->syn) {
108645- if (th->fin)
108646+ if (th->fin || th->urg || th->psh)
108647 goto discard;
108648 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
108649 return 1;
108650diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
108651index 77cccda..10122c4 100644
108652--- a/net/ipv4/tcp_ipv4.c
108653+++ b/net/ipv4/tcp_ipv4.c
108654@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
108655 EXPORT_SYMBOL(sysctl_tcp_low_latency);
108656
108657
108658+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108659+extern int grsec_enable_blackhole;
108660+#endif
108661+
108662 #ifdef CONFIG_TCP_MD5SIG
108663 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
108664 __be32 daddr, __be32 saddr, const struct tcphdr *th);
108665@@ -1591,6 +1595,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
108666 return 0;
108667
108668 reset:
108669+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108670+ if (!grsec_enable_blackhole)
108671+#endif
108672 tcp_v4_send_reset(rsk, skb);
108673 discard:
108674 kfree_skb(skb);
108675@@ -1737,12 +1744,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
108676 TCP_SKB_CB(skb)->sacked = 0;
108677
108678 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
108679- if (!sk)
108680+ if (!sk) {
108681+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108682+ ret = 1;
108683+#endif
108684 goto no_tcp_socket;
108685-
108686+ }
108687 process:
108688- if (sk->sk_state == TCP_TIME_WAIT)
108689+ if (sk->sk_state == TCP_TIME_WAIT) {
108690+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108691+ ret = 2;
108692+#endif
108693 goto do_time_wait;
108694+ }
108695
108696 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
108697 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
108698@@ -1796,6 +1810,10 @@ csum_error:
108699 bad_packet:
108700 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
108701 } else {
108702+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108703+ if (!grsec_enable_blackhole || (ret == 1 &&
108704+ (skb->dev->flags & IFF_LOOPBACK)))
108705+#endif
108706 tcp_v4_send_reset(NULL, skb);
108707 }
108708
108709diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
108710index e68e0d4..0334263 100644
108711--- a/net/ipv4/tcp_minisocks.c
108712+++ b/net/ipv4/tcp_minisocks.c
108713@@ -27,6 +27,10 @@
108714 #include <net/inet_common.h>
108715 #include <net/xfrm.h>
108716
108717+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108718+extern int grsec_enable_blackhole;
108719+#endif
108720+
108721 int sysctl_tcp_syncookies __read_mostly = 1;
108722 EXPORT_SYMBOL(sysctl_tcp_syncookies);
108723
108724@@ -740,7 +744,10 @@ embryonic_reset:
108725 * avoid becoming vulnerable to outside attack aiming at
108726 * resetting legit local connections.
108727 */
108728- req->rsk_ops->send_reset(sk, skb);
108729+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108730+ if (!grsec_enable_blackhole)
108731+#endif
108732+ req->rsk_ops->send_reset(sk, skb);
108733 } else if (fastopen) { /* received a valid RST pkt */
108734 reqsk_fastopen_remove(sk, req, true);
108735 tcp_reset(sk);
108736diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
108737index 3b66610..bfbe23a 100644
108738--- a/net/ipv4/tcp_probe.c
108739+++ b/net/ipv4/tcp_probe.c
108740@@ -238,7 +238,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
108741 if (cnt + width >= len)
108742 break;
108743
108744- if (copy_to_user(buf + cnt, tbuf, width))
108745+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
108746 return -EFAULT;
108747 cnt += width;
108748 }
108749diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
108750index 286227a..c495a76 100644
108751--- a/net/ipv4/tcp_timer.c
108752+++ b/net/ipv4/tcp_timer.c
108753@@ -22,6 +22,10 @@
108754 #include <linux/gfp.h>
108755 #include <net/tcp.h>
108756
108757+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108758+extern int grsec_lastack_retries;
108759+#endif
108760+
108761 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
108762 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
108763 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
108764@@ -192,6 +196,13 @@ static int tcp_write_timeout(struct sock *sk)
108765 }
108766 }
108767
108768+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108769+ if ((sk->sk_state == TCP_LAST_ACK) &&
108770+ (grsec_lastack_retries > 0) &&
108771+ (grsec_lastack_retries < retry_until))
108772+ retry_until = grsec_lastack_retries;
108773+#endif
108774+
108775 if (retransmits_timed_out(sk, retry_until,
108776 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
108777 /* Has it gone just too far? */
108778diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
108779index 7d5a866..4874211 100644
108780--- a/net/ipv4/udp.c
108781+++ b/net/ipv4/udp.c
108782@@ -87,6 +87,7 @@
108783 #include <linux/types.h>
108784 #include <linux/fcntl.h>
108785 #include <linux/module.h>
108786+#include <linux/security.h>
108787 #include <linux/socket.h>
108788 #include <linux/sockios.h>
108789 #include <linux/igmp.h>
108790@@ -113,6 +114,10 @@
108791 #include <net/busy_poll.h>
108792 #include "udp_impl.h"
108793
108794+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108795+extern int grsec_enable_blackhole;
108796+#endif
108797+
108798 struct udp_table udp_table __read_mostly;
108799 EXPORT_SYMBOL(udp_table);
108800
108801@@ -615,6 +620,9 @@ found:
108802 return s;
108803 }
108804
108805+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
108806+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
108807+
108808 /*
108809 * This routine is called by the ICMP module when it gets some
108810 * sort of error condition. If err < 0 then the socket should
108811@@ -952,9 +960,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
108812 dport = usin->sin_port;
108813 if (dport == 0)
108814 return -EINVAL;
108815+
108816+ err = gr_search_udp_sendmsg(sk, usin);
108817+ if (err)
108818+ return err;
108819 } else {
108820 if (sk->sk_state != TCP_ESTABLISHED)
108821 return -EDESTADDRREQ;
108822+
108823+ err = gr_search_udp_sendmsg(sk, NULL);
108824+ if (err)
108825+ return err;
108826+
108827 daddr = inet->inet_daddr;
108828 dport = inet->inet_dport;
108829 /* Open fast path for connected socket.
108830@@ -1202,7 +1219,7 @@ static unsigned int first_packet_length(struct sock *sk)
108831 IS_UDPLITE(sk));
108832 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
108833 IS_UDPLITE(sk));
108834- atomic_inc(&sk->sk_drops);
108835+ atomic_inc_unchecked(&sk->sk_drops);
108836 __skb_unlink(skb, rcvq);
108837 __skb_queue_tail(&list_kill, skb);
108838 }
108839@@ -1282,6 +1299,10 @@ try_again:
108840 if (!skb)
108841 goto out;
108842
108843+ err = gr_search_udp_recvmsg(sk, skb);
108844+ if (err)
108845+ goto out_free;
108846+
108847 ulen = skb->len - sizeof(struct udphdr);
108848 copied = len;
108849 if (copied > ulen)
108850@@ -1315,7 +1336,7 @@ try_again:
108851 if (unlikely(err)) {
108852 trace_kfree_skb(skb, udp_recvmsg);
108853 if (!peeked) {
108854- atomic_inc(&sk->sk_drops);
108855+ atomic_inc_unchecked(&sk->sk_drops);
108856 UDP_INC_STATS_USER(sock_net(sk),
108857 UDP_MIB_INERRORS, is_udplite);
108858 }
108859@@ -1612,7 +1633,7 @@ csum_error:
108860 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
108861 drop:
108862 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
108863- atomic_inc(&sk->sk_drops);
108864+ atomic_inc_unchecked(&sk->sk_drops);
108865 kfree_skb(skb);
108866 return -1;
108867 }
108868@@ -1631,7 +1652,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
108869 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
108870
108871 if (!skb1) {
108872- atomic_inc(&sk->sk_drops);
108873+ atomic_inc_unchecked(&sk->sk_drops);
108874 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
108875 IS_UDPLITE(sk));
108876 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
108877@@ -1817,6 +1838,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
108878 goto csum_error;
108879
108880 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
108881+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108882+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
108883+#endif
108884 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
108885
108886 /*
108887@@ -2403,7 +2427,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
108888 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
108889 0, sock_i_ino(sp),
108890 atomic_read(&sp->sk_refcnt), sp,
108891- atomic_read(&sp->sk_drops));
108892+ atomic_read_unchecked(&sp->sk_drops));
108893 }
108894
108895 int udp4_seq_show(struct seq_file *seq, void *v)
108896diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
108897index 6156f68..d6ab46d 100644
108898--- a/net/ipv4/xfrm4_policy.c
108899+++ b/net/ipv4/xfrm4_policy.c
108900@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
108901 fl4->flowi4_tos = iph->tos;
108902 }
108903
108904-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
108905+static int xfrm4_garbage_collect(struct dst_ops *ops)
108906 {
108907 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
108908
108909- xfrm4_policy_afinfo.garbage_collect(net);
108910+ xfrm_garbage_collect_deferred(net);
108911 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
108912 }
108913
108914@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
108915
108916 static int __net_init xfrm4_net_init(struct net *net)
108917 {
108918- struct ctl_table *table;
108919+ ctl_table_no_const *table = NULL;
108920 struct ctl_table_header *hdr;
108921
108922- table = xfrm4_policy_table;
108923 if (!net_eq(net, &init_net)) {
108924- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
108925+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
108926 if (!table)
108927 goto err_alloc;
108928
108929 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
108930- }
108931-
108932- hdr = register_net_sysctl(net, "net/ipv4", table);
108933+ hdr = register_net_sysctl(net, "net/ipv4", table);
108934+ } else
108935+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
108936 if (!hdr)
108937 goto err_reg;
108938
108939@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
108940 return 0;
108941
108942 err_reg:
108943- if (!net_eq(net, &init_net))
108944- kfree(table);
108945+ kfree(table);
108946 err_alloc:
108947 return -ENOMEM;
108948 }
108949diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
108950index 5667b30..9b1b876 100644
108951--- a/net/ipv6/addrconf.c
108952+++ b/net/ipv6/addrconf.c
108953@@ -593,7 +593,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
108954 idx = 0;
108955 head = &net->dev_index_head[h];
108956 rcu_read_lock();
108957- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
108958+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
108959 net->dev_base_seq;
108960 hlist_for_each_entry_rcu(dev, head, index_hlist) {
108961 if (idx < s_idx)
108962@@ -2390,7 +2390,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
108963 p.iph.ihl = 5;
108964 p.iph.protocol = IPPROTO_IPV6;
108965 p.iph.ttl = 64;
108966- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
108967+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
108968
108969 if (ops->ndo_do_ioctl) {
108970 mm_segment_t oldfs = get_fs();
108971@@ -3516,16 +3516,23 @@ static const struct file_operations if6_fops = {
108972 .release = seq_release_net,
108973 };
108974
108975+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
108976+extern void unregister_ipv6_seq_ops_addr(void);
108977+
108978 static int __net_init if6_proc_net_init(struct net *net)
108979 {
108980- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
108981+ register_ipv6_seq_ops_addr(&if6_seq_ops);
108982+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
108983+ unregister_ipv6_seq_ops_addr();
108984 return -ENOMEM;
108985+ }
108986 return 0;
108987 }
108988
108989 static void __net_exit if6_proc_net_exit(struct net *net)
108990 {
108991 remove_proc_entry("if_inet6", net->proc_net);
108992+ unregister_ipv6_seq_ops_addr();
108993 }
108994
108995 static struct pernet_operations if6_proc_net_ops = {
108996@@ -4141,7 +4148,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
108997 s_ip_idx = ip_idx = cb->args[2];
108998
108999 rcu_read_lock();
109000- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
109001+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
109002 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
109003 idx = 0;
109004 head = &net->dev_index_head[h];
109005@@ -4753,7 +4760,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
109006 dst_free(&ifp->rt->dst);
109007 break;
109008 }
109009- atomic_inc(&net->ipv6.dev_addr_genid);
109010+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
109011 rt_genid_bump_ipv6(net);
109012 }
109013
109014@@ -4774,7 +4781,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
109015 int *valp = ctl->data;
109016 int val = *valp;
109017 loff_t pos = *ppos;
109018- struct ctl_table lctl;
109019+ ctl_table_no_const lctl;
109020 int ret;
109021
109022 /*
109023@@ -4859,7 +4866,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
109024 int *valp = ctl->data;
109025 int val = *valp;
109026 loff_t pos = *ppos;
109027- struct ctl_table lctl;
109028+ ctl_table_no_const lctl;
109029 int ret;
109030
109031 /*
109032diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
109033index 7cb4392..dc96d28 100644
109034--- a/net/ipv6/af_inet6.c
109035+++ b/net/ipv6/af_inet6.c
109036@@ -765,7 +765,7 @@ static int __net_init inet6_net_init(struct net *net)
109037 net->ipv6.sysctl.bindv6only = 0;
109038 net->ipv6.sysctl.icmpv6_time = 1*HZ;
109039 net->ipv6.sysctl.flowlabel_consistency = 1;
109040- atomic_set(&net->ipv6.rt_genid, 0);
109041+ atomic_set_unchecked(&net->ipv6.rt_genid, 0);
109042
109043 err = ipv6_init_mibs(net);
109044 if (err)
109045diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
109046index c3bf2d2..1f00573 100644
109047--- a/net/ipv6/datagram.c
109048+++ b/net/ipv6/datagram.c
109049@@ -938,5 +938,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
109050 0,
109051 sock_i_ino(sp),
109052 atomic_read(&sp->sk_refcnt), sp,
109053- atomic_read(&sp->sk_drops));
109054+ atomic_read_unchecked(&sp->sk_drops));
109055 }
109056diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
109057index f6c84a6..9f2084e 100644
109058--- a/net/ipv6/icmp.c
109059+++ b/net/ipv6/icmp.c
109060@@ -990,7 +990,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
109061
109062 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
109063 {
109064- struct ctl_table *table;
109065+ ctl_table_no_const *table;
109066
109067 table = kmemdup(ipv6_icmp_table_template,
109068 sizeof(ipv6_icmp_table_template),
109069diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
109070index 3873181..220ad3f 100644
109071--- a/net/ipv6/ip6_gre.c
109072+++ b/net/ipv6/ip6_gre.c
109073@@ -71,8 +71,8 @@ struct ip6gre_net {
109074 struct net_device *fb_tunnel_dev;
109075 };
109076
109077-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
109078-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
109079+static struct rtnl_link_ops ip6gre_link_ops;
109080+static struct rtnl_link_ops ip6gre_tap_ops;
109081 static int ip6gre_tunnel_init(struct net_device *dev);
109082 static void ip6gre_tunnel_setup(struct net_device *dev);
109083 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
109084@@ -1280,7 +1280,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
109085 }
109086
109087
109088-static struct inet6_protocol ip6gre_protocol __read_mostly = {
109089+static struct inet6_protocol ip6gre_protocol = {
109090 .handler = ip6gre_rcv,
109091 .err_handler = ip6gre_err,
109092 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
109093@@ -1638,7 +1638,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
109094 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
109095 };
109096
109097-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
109098+static struct rtnl_link_ops ip6gre_link_ops = {
109099 .kind = "ip6gre",
109100 .maxtype = IFLA_GRE_MAX,
109101 .policy = ip6gre_policy,
109102@@ -1652,7 +1652,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
109103 .fill_info = ip6gre_fill_info,
109104 };
109105
109106-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
109107+static struct rtnl_link_ops ip6gre_tap_ops = {
109108 .kind = "ip6gretap",
109109 .maxtype = IFLA_GRE_MAX,
109110 .policy = ip6gre_policy,
109111diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
109112index afa0824..04ba530 100644
109113--- a/net/ipv6/ip6_tunnel.c
109114+++ b/net/ipv6/ip6_tunnel.c
109115@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
109116
109117 static int ip6_tnl_dev_init(struct net_device *dev);
109118 static void ip6_tnl_dev_setup(struct net_device *dev);
109119-static struct rtnl_link_ops ip6_link_ops __read_mostly;
109120+static struct rtnl_link_ops ip6_link_ops;
109121
109122 static int ip6_tnl_net_id __read_mostly;
109123 struct ip6_tnl_net {
109124@@ -1708,7 +1708,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
109125 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
109126 };
109127
109128-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
109129+static struct rtnl_link_ops ip6_link_ops = {
109130 .kind = "ip6tnl",
109131 .maxtype = IFLA_IPTUN_MAX,
109132 .policy = ip6_tnl_policy,
109133diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
109134index 9aaa6bb..5c13e57 100644
109135--- a/net/ipv6/ip6_vti.c
109136+++ b/net/ipv6/ip6_vti.c
109137@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
109138
109139 static int vti6_dev_init(struct net_device *dev);
109140 static void vti6_dev_setup(struct net_device *dev);
109141-static struct rtnl_link_ops vti6_link_ops __read_mostly;
109142+static struct rtnl_link_ops vti6_link_ops;
109143
109144 static int vti6_net_id __read_mostly;
109145 struct vti6_net {
109146@@ -977,7 +977,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
109147 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
109148 };
109149
109150-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
109151+static struct rtnl_link_ops vti6_link_ops = {
109152 .kind = "vti6",
109153 .maxtype = IFLA_VTI_MAX,
109154 .policy = vti6_policy,
109155diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
109156index edb58af..78de133 100644
109157--- a/net/ipv6/ipv6_sockglue.c
109158+++ b/net/ipv6/ipv6_sockglue.c
109159@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
109160 if (sk->sk_type != SOCK_STREAM)
109161 return -ENOPROTOOPT;
109162
109163- msg.msg_control = optval;
109164+ msg.msg_control = (void __force_kernel *)optval;
109165 msg.msg_controllen = len;
109166 msg.msg_flags = flags;
109167
109168diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
109169index e080fbb..412b3cf 100644
109170--- a/net/ipv6/netfilter/ip6_tables.c
109171+++ b/net/ipv6/netfilter/ip6_tables.c
109172@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
109173 #endif
109174
109175 static int get_info(struct net *net, void __user *user,
109176- const int *len, int compat)
109177+ int len, int compat)
109178 {
109179 char name[XT_TABLE_MAXNAMELEN];
109180 struct xt_table *t;
109181 int ret;
109182
109183- if (*len != sizeof(struct ip6t_getinfo)) {
109184- duprintf("length %u != %zu\n", *len,
109185+ if (len != sizeof(struct ip6t_getinfo)) {
109186+ duprintf("length %u != %zu\n", len,
109187 sizeof(struct ip6t_getinfo));
109188 return -EINVAL;
109189 }
109190@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
109191 info.size = private->size;
109192 strcpy(info.name, name);
109193
109194- if (copy_to_user(user, &info, *len) != 0)
109195+ if (copy_to_user(user, &info, len) != 0)
109196 ret = -EFAULT;
109197 else
109198 ret = 0;
109199@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
109200
109201 switch (cmd) {
109202 case IP6T_SO_GET_INFO:
109203- ret = get_info(sock_net(sk), user, len, 1);
109204+ ret = get_info(sock_net(sk), user, *len, 1);
109205 break;
109206 case IP6T_SO_GET_ENTRIES:
109207 ret = compat_get_entries(sock_net(sk), user, len);
109208@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
109209
109210 switch (cmd) {
109211 case IP6T_SO_GET_INFO:
109212- ret = get_info(sock_net(sk), user, len, 0);
109213+ ret = get_info(sock_net(sk), user, *len, 0);
109214 break;
109215
109216 case IP6T_SO_GET_ENTRIES:
109217diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
109218index 0d5279f..89d9f6f 100644
109219--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
109220+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
109221@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
109222
109223 static int nf_ct_frag6_sysctl_register(struct net *net)
109224 {
109225- struct ctl_table *table;
109226+ ctl_table_no_const *table = NULL;
109227 struct ctl_table_header *hdr;
109228
109229- table = nf_ct_frag6_sysctl_table;
109230 if (!net_eq(net, &init_net)) {
109231- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
109232+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
109233 GFP_KERNEL);
109234 if (table == NULL)
109235 goto err_alloc;
109236@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
109237 table[0].data = &net->nf_frag.frags.timeout;
109238 table[1].data = &net->nf_frag.frags.low_thresh;
109239 table[2].data = &net->nf_frag.frags.high_thresh;
109240- }
109241-
109242- hdr = register_net_sysctl(net, "net/netfilter", table);
109243+ hdr = register_net_sysctl(net, "net/netfilter", table);
109244+ } else
109245+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
109246 if (hdr == NULL)
109247 goto err_reg;
109248
109249@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
109250 return 0;
109251
109252 err_reg:
109253- if (!net_eq(net, &init_net))
109254- kfree(table);
109255+ kfree(table);
109256 err_alloc:
109257 return -ENOMEM;
109258 }
109259diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
109260index 5b7a1ed..d9da205 100644
109261--- a/net/ipv6/ping.c
109262+++ b/net/ipv6/ping.c
109263@@ -240,6 +240,24 @@ static struct pernet_operations ping_v6_net_ops = {
109264 };
109265 #endif
109266
109267+static struct pingv6_ops real_pingv6_ops = {
109268+ .ipv6_recv_error = ipv6_recv_error,
109269+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
109270+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
109271+ .icmpv6_err_convert = icmpv6_err_convert,
109272+ .ipv6_icmp_error = ipv6_icmp_error,
109273+ .ipv6_chk_addr = ipv6_chk_addr,
109274+};
109275+
109276+static struct pingv6_ops dummy_pingv6_ops = {
109277+ .ipv6_recv_error = dummy_ipv6_recv_error,
109278+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
109279+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
109280+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
109281+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
109282+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
109283+};
109284+
109285 int __init pingv6_init(void)
109286 {
109287 #ifdef CONFIG_PROC_FS
109288@@ -247,13 +265,7 @@ int __init pingv6_init(void)
109289 if (ret)
109290 return ret;
109291 #endif
109292- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
109293- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
109294- pingv6_ops.ip6_datagram_recv_specific_ctl =
109295- ip6_datagram_recv_specific_ctl;
109296- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
109297- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
109298- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
109299+ pingv6_ops = &real_pingv6_ops;
109300 return inet6_register_protosw(&pingv6_protosw);
109301 }
109302
109303@@ -262,14 +274,9 @@ int __init pingv6_init(void)
109304 */
109305 void pingv6_exit(void)
109306 {
109307- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
109308- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
109309- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
109310- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
109311- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
109312- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
109313 #ifdef CONFIG_PROC_FS
109314 unregister_pernet_subsys(&ping_v6_net_ops);
109315 #endif
109316+ pingv6_ops = &dummy_pingv6_ops;
109317 inet6_unregister_protosw(&pingv6_protosw);
109318 }
109319diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
109320index 3317440..201764e 100644
109321--- a/net/ipv6/proc.c
109322+++ b/net/ipv6/proc.c
109323@@ -309,7 +309,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
109324 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
109325 goto proc_snmp6_fail;
109326
109327- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
109328+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
109329 if (!net->mib.proc_net_devsnmp6)
109330 goto proc_dev_snmp6_fail;
109331 return 0;
109332diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
109333index b2dc60b..a6b6c10 100644
109334--- a/net/ipv6/raw.c
109335+++ b/net/ipv6/raw.c
109336@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
109337 {
109338 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
109339 skb_checksum_complete(skb)) {
109340- atomic_inc(&sk->sk_drops);
109341+ atomic_inc_unchecked(&sk->sk_drops);
109342 kfree_skb(skb);
109343 return NET_RX_DROP;
109344 }
109345@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
109346 struct raw6_sock *rp = raw6_sk(sk);
109347
109348 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
109349- atomic_inc(&sk->sk_drops);
109350+ atomic_inc_unchecked(&sk->sk_drops);
109351 kfree_skb(skb);
109352 return NET_RX_DROP;
109353 }
109354@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
109355
109356 if (inet->hdrincl) {
109357 if (skb_checksum_complete(skb)) {
109358- atomic_inc(&sk->sk_drops);
109359+ atomic_inc_unchecked(&sk->sk_drops);
109360 kfree_skb(skb);
109361 return NET_RX_DROP;
109362 }
109363@@ -610,7 +610,7 @@ out:
109364 return err;
109365 }
109366
109367-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
109368+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
109369 struct flowi6 *fl6, struct dst_entry **dstp,
109370 unsigned int flags)
109371 {
109372@@ -916,12 +916,15 @@ do_confirm:
109373 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
109374 char __user *optval, int optlen)
109375 {
109376+ struct icmp6_filter filter;
109377+
109378 switch (optname) {
109379 case ICMPV6_FILTER:
109380 if (optlen > sizeof(struct icmp6_filter))
109381 optlen = sizeof(struct icmp6_filter);
109382- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
109383+ if (copy_from_user(&filter, optval, optlen))
109384 return -EFAULT;
109385+ raw6_sk(sk)->filter = filter;
109386 return 0;
109387 default:
109388 return -ENOPROTOOPT;
109389@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
109390 char __user *optval, int __user *optlen)
109391 {
109392 int len;
109393+ struct icmp6_filter filter;
109394
109395 switch (optname) {
109396 case ICMPV6_FILTER:
109397@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
109398 len = sizeof(struct icmp6_filter);
109399 if (put_user(len, optlen))
109400 return -EFAULT;
109401- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
109402+ filter = raw6_sk(sk)->filter;
109403+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
109404 return -EFAULT;
109405 return 0;
109406 default:
109407diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
109408index cc85a9b..526a133 100644
109409--- a/net/ipv6/reassembly.c
109410+++ b/net/ipv6/reassembly.c
109411@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
109412
109413 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
109414 {
109415- struct ctl_table *table;
109416+ ctl_table_no_const *table = NULL;
109417 struct ctl_table_header *hdr;
109418
109419- table = ip6_frags_ns_ctl_table;
109420 if (!net_eq(net, &init_net)) {
109421- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
109422+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
109423 if (table == NULL)
109424 goto err_alloc;
109425
109426@@ -642,9 +641,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
109427 /* Don't export sysctls to unprivileged users */
109428 if (net->user_ns != &init_user_ns)
109429 table[0].procname = NULL;
109430- }
109431+ hdr = register_net_sysctl(net, "net/ipv6", table);
109432+ } else
109433+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
109434
109435- hdr = register_net_sysctl(net, "net/ipv6", table);
109436 if (hdr == NULL)
109437 goto err_reg;
109438
109439@@ -652,8 +652,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
109440 return 0;
109441
109442 err_reg:
109443- if (!net_eq(net, &init_net))
109444- kfree(table);
109445+ kfree(table);
109446 err_alloc:
109447 return -ENOMEM;
109448 }
109449diff --git a/net/ipv6/route.c b/net/ipv6/route.c
109450index f23fbd2..7868241 100644
109451--- a/net/ipv6/route.c
109452+++ b/net/ipv6/route.c
109453@@ -2971,7 +2971,7 @@ struct ctl_table ipv6_route_table_template[] = {
109454
109455 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
109456 {
109457- struct ctl_table *table;
109458+ ctl_table_no_const *table;
109459
109460 table = kmemdup(ipv6_route_table_template,
109461 sizeof(ipv6_route_table_template),
109462diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
109463index 4f40817..54dcbef 100644
109464--- a/net/ipv6/sit.c
109465+++ b/net/ipv6/sit.c
109466@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
109467 static void ipip6_dev_free(struct net_device *dev);
109468 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
109469 __be32 *v4dst);
109470-static struct rtnl_link_ops sit_link_ops __read_mostly;
109471+static struct rtnl_link_ops sit_link_ops;
109472
109473 static int sit_net_id __read_mostly;
109474 struct sit_net {
109475@@ -1661,7 +1661,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
109476 unregister_netdevice_queue(dev, head);
109477 }
109478
109479-static struct rtnl_link_ops sit_link_ops __read_mostly = {
109480+static struct rtnl_link_ops sit_link_ops = {
109481 .kind = "sit",
109482 .maxtype = IFLA_IPTUN_MAX,
109483 .policy = ipip6_policy,
109484diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
109485index 058f3ec..dec973d 100644
109486--- a/net/ipv6/sysctl_net_ipv6.c
109487+++ b/net/ipv6/sysctl_net_ipv6.c
109488@@ -61,7 +61,7 @@ static struct ctl_table ipv6_rotable[] = {
109489
109490 static int __net_init ipv6_sysctl_net_init(struct net *net)
109491 {
109492- struct ctl_table *ipv6_table;
109493+ ctl_table_no_const *ipv6_table;
109494 struct ctl_table *ipv6_route_table;
109495 struct ctl_table *ipv6_icmp_table;
109496 int err;
109497diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
109498index 229239ad..ee2802f 100644
109499--- a/net/ipv6/tcp_ipv6.c
109500+++ b/net/ipv6/tcp_ipv6.c
109501@@ -102,6 +102,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
109502 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
109503 }
109504
109505+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109506+extern int grsec_enable_blackhole;
109507+#endif
109508+
109509 static void tcp_v6_hash(struct sock *sk)
109510 {
109511 if (sk->sk_state != TCP_CLOSE) {
109512@@ -1424,6 +1428,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
109513 return 0;
109514
109515 reset:
109516+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109517+ if (!grsec_enable_blackhole)
109518+#endif
109519 tcp_v6_send_reset(sk, skb);
109520 discard:
109521 if (opt_skb)
109522@@ -1508,12 +1515,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
109523 TCP_SKB_CB(skb)->sacked = 0;
109524
109525 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
109526- if (!sk)
109527+ if (!sk) {
109528+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109529+ ret = 1;
109530+#endif
109531 goto no_tcp_socket;
109532+ }
109533
109534 process:
109535- if (sk->sk_state == TCP_TIME_WAIT)
109536+ if (sk->sk_state == TCP_TIME_WAIT) {
109537+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109538+ ret = 2;
109539+#endif
109540 goto do_time_wait;
109541+ }
109542
109543 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
109544 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
109545@@ -1565,6 +1580,10 @@ csum_error:
109546 bad_packet:
109547 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
109548 } else {
109549+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109550+ if (!grsec_enable_blackhole || (ret == 1 &&
109551+ (skb->dev->flags & IFF_LOOPBACK)))
109552+#endif
109553 tcp_v6_send_reset(NULL, skb);
109554 }
109555
109556diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
109557index 7092ff7..3fd0eb4 100644
109558--- a/net/ipv6/udp.c
109559+++ b/net/ipv6/udp.c
109560@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
109561 udp_ipv6_hash_secret + net_hash_mix(net));
109562 }
109563
109564+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109565+extern int grsec_enable_blackhole;
109566+#endif
109567+
109568 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
109569 {
109570 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
109571@@ -435,7 +439,7 @@ try_again:
109572 if (unlikely(err)) {
109573 trace_kfree_skb(skb, udpv6_recvmsg);
109574 if (!peeked) {
109575- atomic_inc(&sk->sk_drops);
109576+ atomic_inc_unchecked(&sk->sk_drops);
109577 if (is_udp4)
109578 UDP_INC_STATS_USER(sock_net(sk),
109579 UDP_MIB_INERRORS,
109580@@ -698,7 +702,7 @@ csum_error:
109581 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
109582 drop:
109583 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
109584- atomic_inc(&sk->sk_drops);
109585+ atomic_inc_unchecked(&sk->sk_drops);
109586 kfree_skb(skb);
109587 return -1;
109588 }
109589@@ -754,7 +758,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
109590 if (likely(skb1 == NULL))
109591 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
109592 if (!skb1) {
109593- atomic_inc(&sk->sk_drops);
109594+ atomic_inc_unchecked(&sk->sk_drops);
109595 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
109596 IS_UDPLITE(sk));
109597 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
109598@@ -920,6 +924,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
109599 goto csum_error;
109600
109601 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
109602+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109603+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
109604+#endif
109605 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
109606
109607 kfree_skb(skb);
109608diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
109609index 2a0bbda..d75ca57 100644
109610--- a/net/ipv6/xfrm6_policy.c
109611+++ b/net/ipv6/xfrm6_policy.c
109612@@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
109613 }
109614 }
109615
109616-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
109617+static int xfrm6_garbage_collect(struct dst_ops *ops)
109618 {
109619 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
109620
109621- xfrm6_policy_afinfo.garbage_collect(net);
109622+ xfrm_garbage_collect_deferred(net);
109623 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
109624 }
109625
109626@@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_table[] = {
109627
109628 static int __net_init xfrm6_net_init(struct net *net)
109629 {
109630- struct ctl_table *table;
109631+ ctl_table_no_const *table = NULL;
109632 struct ctl_table_header *hdr;
109633
109634- table = xfrm6_policy_table;
109635 if (!net_eq(net, &init_net)) {
109636- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
109637+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
109638 if (!table)
109639 goto err_alloc;
109640
109641 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
109642- }
109643+ hdr = register_net_sysctl(net, "net/ipv6", table);
109644+ } else
109645+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
109646
109647- hdr = register_net_sysctl(net, "net/ipv6", table);
109648 if (!hdr)
109649 goto err_reg;
109650
109651@@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(struct net *net)
109652 return 0;
109653
109654 err_reg:
109655- if (!net_eq(net, &init_net))
109656- kfree(table);
109657+ kfree(table);
109658 err_alloc:
109659 return -ENOMEM;
109660 }
109661diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
109662index e15c16a..7cf07aa 100644
109663--- a/net/ipx/ipx_proc.c
109664+++ b/net/ipx/ipx_proc.c
109665@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
109666 struct proc_dir_entry *p;
109667 int rc = -ENOMEM;
109668
109669- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
109670+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
109671
109672 if (!ipx_proc_dir)
109673 goto out;
109674diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
109675index 2ba8b97..6d33010 100644
109676--- a/net/irda/ircomm/ircomm_tty.c
109677+++ b/net/irda/ircomm/ircomm_tty.c
109678@@ -317,11 +317,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
109679 add_wait_queue(&port->open_wait, &wait);
109680
109681 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
109682- __FILE__, __LINE__, tty->driver->name, port->count);
109683+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
109684
109685 spin_lock_irqsave(&port->lock, flags);
109686 if (!tty_hung_up_p(filp))
109687- port->count--;
109688+ atomic_dec(&port->count);
109689 port->blocked_open++;
109690 spin_unlock_irqrestore(&port->lock, flags);
109691
109692@@ -356,7 +356,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
109693 }
109694
109695 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
109696- __FILE__, __LINE__, tty->driver->name, port->count);
109697+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
109698
109699 schedule();
109700 }
109701@@ -366,12 +366,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
109702
109703 spin_lock_irqsave(&port->lock, flags);
109704 if (!tty_hung_up_p(filp))
109705- port->count++;
109706+ atomic_inc(&port->count);
109707 port->blocked_open--;
109708 spin_unlock_irqrestore(&port->lock, flags);
109709
109710 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
109711- __FILE__, __LINE__, tty->driver->name, port->count);
109712+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
109713
109714 if (!retval)
109715 port->flags |= ASYNC_NORMAL_ACTIVE;
109716@@ -445,12 +445,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
109717
109718 /* ++ is not atomic, so this should be protected - Jean II */
109719 spin_lock_irqsave(&self->port.lock, flags);
109720- self->port.count++;
109721+ atomic_inc(&self->port.count);
109722 spin_unlock_irqrestore(&self->port.lock, flags);
109723 tty_port_tty_set(&self->port, tty);
109724
109725 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
109726- self->line, self->port.count);
109727+ self->line, atomic_read(&self->port.count));
109728
109729 /* Not really used by us, but lets do it anyway */
109730 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
109731@@ -987,7 +987,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
109732 tty_kref_put(port->tty);
109733 }
109734 port->tty = NULL;
109735- port->count = 0;
109736+ atomic_set(&port->count, 0);
109737 spin_unlock_irqrestore(&port->lock, flags);
109738
109739 wake_up_interruptible(&port->open_wait);
109740@@ -1344,7 +1344,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
109741 seq_putc(m, '\n');
109742
109743 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
109744- seq_printf(m, "Open count: %d\n", self->port.count);
109745+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
109746 seq_printf(m, "Max data size: %d\n", self->max_data_size);
109747 seq_printf(m, "Max header size: %d\n", self->max_header_size);
109748
109749diff --git a/net/irda/irproc.c b/net/irda/irproc.c
109750index b9ac598..f88cc56 100644
109751--- a/net/irda/irproc.c
109752+++ b/net/irda/irproc.c
109753@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
109754 {
109755 int i;
109756
109757- proc_irda = proc_mkdir("irda", init_net.proc_net);
109758+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
109759 if (proc_irda == NULL)
109760 return;
109761
109762diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
109763index 7a95fa4..57be196 100644
109764--- a/net/iucv/af_iucv.c
109765+++ b/net/iucv/af_iucv.c
109766@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
109767 {
109768 char name[12];
109769
109770- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
109771+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
109772 while (__iucv_get_sock_by_name(name)) {
109773 sprintf(name, "%08x",
109774- atomic_inc_return(&iucv_sk_list.autobind_name));
109775+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
109776 }
109777 memcpy(iucv->src_name, name, 8);
109778 }
109779diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
109780index da78793..bdd78cf 100644
109781--- a/net/iucv/iucv.c
109782+++ b/net/iucv/iucv.c
109783@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
109784 return NOTIFY_OK;
109785 }
109786
109787-static struct notifier_block __refdata iucv_cpu_notifier = {
109788+static struct notifier_block iucv_cpu_notifier = {
109789 .notifier_call = iucv_cpu_notify,
109790 };
109791
109792diff --git a/net/key/af_key.c b/net/key/af_key.c
109793index ba2a2f9..b658bc3 100644
109794--- a/net/key/af_key.c
109795+++ b/net/key/af_key.c
109796@@ -3052,10 +3052,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
109797 static u32 get_acqseq(void)
109798 {
109799 u32 res;
109800- static atomic_t acqseq;
109801+ static atomic_unchecked_t acqseq;
109802
109803 do {
109804- res = atomic_inc_return(&acqseq);
109805+ res = atomic_inc_return_unchecked(&acqseq);
109806 } while (!res);
109807 return res;
109808 }
109809diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
109810index 76125c5..e474828 100644
109811--- a/net/l2tp/l2tp_eth.c
109812+++ b/net/l2tp/l2tp_eth.c
109813@@ -42,12 +42,12 @@ struct l2tp_eth {
109814 struct sock *tunnel_sock;
109815 struct l2tp_session *session;
109816 struct list_head list;
109817- atomic_long_t tx_bytes;
109818- atomic_long_t tx_packets;
109819- atomic_long_t tx_dropped;
109820- atomic_long_t rx_bytes;
109821- atomic_long_t rx_packets;
109822- atomic_long_t rx_errors;
109823+ atomic_long_unchecked_t tx_bytes;
109824+ atomic_long_unchecked_t tx_packets;
109825+ atomic_long_unchecked_t tx_dropped;
109826+ atomic_long_unchecked_t rx_bytes;
109827+ atomic_long_unchecked_t rx_packets;
109828+ atomic_long_unchecked_t rx_errors;
109829 };
109830
109831 /* via l2tp_session_priv() */
109832@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
109833 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
109834
109835 if (likely(ret == NET_XMIT_SUCCESS)) {
109836- atomic_long_add(len, &priv->tx_bytes);
109837- atomic_long_inc(&priv->tx_packets);
109838+ atomic_long_add_unchecked(len, &priv->tx_bytes);
109839+ atomic_long_inc_unchecked(&priv->tx_packets);
109840 } else {
109841- atomic_long_inc(&priv->tx_dropped);
109842+ atomic_long_inc_unchecked(&priv->tx_dropped);
109843 }
109844 return NETDEV_TX_OK;
109845 }
109846@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
109847 {
109848 struct l2tp_eth *priv = netdev_priv(dev);
109849
109850- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
109851- stats->tx_packets = atomic_long_read(&priv->tx_packets);
109852- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
109853- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
109854- stats->rx_packets = atomic_long_read(&priv->rx_packets);
109855- stats->rx_errors = atomic_long_read(&priv->rx_errors);
109856+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
109857+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
109858+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
109859+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
109860+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
109861+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
109862 return stats;
109863 }
109864
109865@@ -166,15 +166,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
109866 nf_reset(skb);
109867
109868 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
109869- atomic_long_inc(&priv->rx_packets);
109870- atomic_long_add(data_len, &priv->rx_bytes);
109871+ atomic_long_inc_unchecked(&priv->rx_packets);
109872+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
109873 } else {
109874- atomic_long_inc(&priv->rx_errors);
109875+ atomic_long_inc_unchecked(&priv->rx_errors);
109876 }
109877 return;
109878
109879 error:
109880- atomic_long_inc(&priv->rx_errors);
109881+ atomic_long_inc_unchecked(&priv->rx_errors);
109882 kfree_skb(skb);
109883 }
109884
109885diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
109886index 1a3c7e0..80f8b0c 100644
109887--- a/net/llc/llc_proc.c
109888+++ b/net/llc/llc_proc.c
109889@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
109890 int rc = -ENOMEM;
109891 struct proc_dir_entry *p;
109892
109893- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
109894+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
109895 if (!llc_proc_dir)
109896 goto out;
109897
109898diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
109899index 592f4b1..efa7aa9 100644
109900--- a/net/mac80211/cfg.c
109901+++ b/net/mac80211/cfg.c
109902@@ -864,7 +864,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
109903 ret = ieee80211_vif_use_channel(sdata, chandef,
109904 IEEE80211_CHANCTX_EXCLUSIVE);
109905 }
109906- } else if (local->open_count == local->monitors) {
109907+ } else if (local_read(&local->open_count) == local->monitors) {
109908 local->_oper_chandef = *chandef;
109909 ieee80211_hw_config(local, 0);
109910 }
109911@@ -3574,7 +3574,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
109912 else
109913 local->probe_req_reg--;
109914
109915- if (!local->open_count)
109916+ if (!local_read(&local->open_count))
109917 break;
109918
109919 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
109920@@ -3723,8 +3723,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
109921 if (chanctx_conf) {
109922 *chandef = chanctx_conf->def;
109923 ret = 0;
109924- } else if (local->open_count > 0 &&
109925- local->open_count == local->monitors &&
109926+ } else if (local_read(&local->open_count) > 0 &&
109927+ local_read(&local->open_count) == local->monitors &&
109928 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
109929 if (local->use_chanctx)
109930 *chandef = local->monitor_chandef;
109931diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
109932index ac9836e..32613c1 100644
109933--- a/net/mac80211/ieee80211_i.h
109934+++ b/net/mac80211/ieee80211_i.h
109935@@ -28,6 +28,7 @@
109936 #include <net/ieee80211_radiotap.h>
109937 #include <net/cfg80211.h>
109938 #include <net/mac80211.h>
109939+#include <asm/local.h>
109940 #include "key.h"
109941 #include "sta_info.h"
109942 #include "debug.h"
109943@@ -1011,7 +1012,7 @@ struct ieee80211_local {
109944 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
109945 spinlock_t queue_stop_reason_lock;
109946
109947- int open_count;
109948+ local_t open_count;
109949 int monitors, cooked_mntrs;
109950 /* number of interfaces with corresponding FIF_ flags */
109951 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
109952diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
109953index 388b863..6575b55 100644
109954--- a/net/mac80211/iface.c
109955+++ b/net/mac80211/iface.c
109956@@ -531,7 +531,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
109957 break;
109958 }
109959
109960- if (local->open_count == 0) {
109961+ if (local_read(&local->open_count) == 0) {
109962 res = drv_start(local);
109963 if (res)
109964 goto err_del_bss;
109965@@ -578,7 +578,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
109966 res = drv_add_interface(local, sdata);
109967 if (res)
109968 goto err_stop;
109969- } else if (local->monitors == 0 && local->open_count == 0) {
109970+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
109971 res = ieee80211_add_virtual_monitor(local);
109972 if (res)
109973 goto err_stop;
109974@@ -687,7 +687,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
109975 atomic_inc(&local->iff_promiscs);
109976
109977 if (coming_up)
109978- local->open_count++;
109979+ local_inc(&local->open_count);
109980
109981 if (hw_reconf_flags)
109982 ieee80211_hw_config(local, hw_reconf_flags);
109983@@ -725,7 +725,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
109984 err_del_interface:
109985 drv_remove_interface(local, sdata);
109986 err_stop:
109987- if (!local->open_count)
109988+ if (!local_read(&local->open_count))
109989 drv_stop(local);
109990 err_del_bss:
109991 sdata->bss = NULL;
109992@@ -888,7 +888,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
109993 }
109994
109995 if (going_down)
109996- local->open_count--;
109997+ local_dec(&local->open_count);
109998
109999 switch (sdata->vif.type) {
110000 case NL80211_IFTYPE_AP_VLAN:
110001@@ -949,7 +949,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
110002 }
110003 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
110004
110005- if (local->open_count == 0)
110006+ if (local_read(&local->open_count) == 0)
110007 ieee80211_clear_tx_pending(local);
110008
110009 /*
110010@@ -989,7 +989,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
110011
110012 ieee80211_recalc_ps(local, -1);
110013
110014- if (local->open_count == 0) {
110015+ if (local_read(&local->open_count) == 0) {
110016 ieee80211_stop_device(local);
110017
110018 /* no reconfiguring after stop! */
110019@@ -1000,7 +1000,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
110020 ieee80211_configure_filter(local);
110021 ieee80211_hw_config(local, hw_reconf_flags);
110022
110023- if (local->monitors == local->open_count)
110024+ if (local->monitors == local_read(&local->open_count))
110025 ieee80211_add_virtual_monitor(local);
110026 }
110027
110028diff --git a/net/mac80211/main.c b/net/mac80211/main.c
110029index d17c26d..43d6bfb 100644
110030--- a/net/mac80211/main.c
110031+++ b/net/mac80211/main.c
110032@@ -174,7 +174,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
110033 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
110034 IEEE80211_CONF_CHANGE_POWER);
110035
110036- if (changed && local->open_count) {
110037+ if (changed && local_read(&local->open_count)) {
110038 ret = drv_config(local, changed);
110039 /*
110040 * Goal:
110041diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
110042index d478b88..8c8d157 100644
110043--- a/net/mac80211/pm.c
110044+++ b/net/mac80211/pm.c
110045@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
110046 struct ieee80211_sub_if_data *sdata;
110047 struct sta_info *sta;
110048
110049- if (!local->open_count)
110050+ if (!local_read(&local->open_count))
110051 goto suspend;
110052
110053 ieee80211_scan_cancel(local);
110054@@ -58,7 +58,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
110055 cancel_work_sync(&local->dynamic_ps_enable_work);
110056 del_timer_sync(&local->dynamic_ps_timer);
110057
110058- local->wowlan = wowlan && local->open_count;
110059+ local->wowlan = wowlan && local_read(&local->open_count);
110060 if (local->wowlan) {
110061 int err = drv_suspend(local, wowlan);
110062 if (err < 0) {
110063@@ -123,7 +123,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
110064 WARN_ON(!list_empty(&local->chanctx_list));
110065
110066 /* stop hardware - this must stop RX */
110067- if (local->open_count)
110068+ if (local_read(&local->open_count))
110069 ieee80211_stop_device(local);
110070
110071 suspend:
110072diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
110073index 8fdadfd..a4f72b8 100644
110074--- a/net/mac80211/rate.c
110075+++ b/net/mac80211/rate.c
110076@@ -720,7 +720,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
110077
110078 ASSERT_RTNL();
110079
110080- if (local->open_count)
110081+ if (local_read(&local->open_count))
110082 return -EBUSY;
110083
110084 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
110085diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
110086index 6ff1346..936ca9a 100644
110087--- a/net/mac80211/rc80211_pid_debugfs.c
110088+++ b/net/mac80211/rc80211_pid_debugfs.c
110089@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
110090
110091 spin_unlock_irqrestore(&events->lock, status);
110092
110093- if (copy_to_user(buf, pb, p))
110094+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
110095 return -EFAULT;
110096
110097 return p;
110098diff --git a/net/mac80211/util.c b/net/mac80211/util.c
110099index a6cda52..f3b6776 100644
110100--- a/net/mac80211/util.c
110101+++ b/net/mac80211/util.c
110102@@ -1548,7 +1548,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
110103 }
110104 #endif
110105 /* everything else happens only if HW was up & running */
110106- if (!local->open_count)
110107+ if (!local_read(&local->open_count))
110108 goto wake_up;
110109
110110 /*
110111@@ -1772,7 +1772,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
110112 local->in_reconfig = false;
110113 barrier();
110114
110115- if (local->monitors == local->open_count && local->monitors > 0)
110116+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
110117 ieee80211_add_virtual_monitor(local);
110118
110119 /*
110120diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
110121index e9410d1..77b6378 100644
110122--- a/net/netfilter/Kconfig
110123+++ b/net/netfilter/Kconfig
110124@@ -1081,6 +1081,16 @@ config NETFILTER_XT_MATCH_ESP
110125
110126 To compile it as a module, choose M here. If unsure, say N.
110127
110128+config NETFILTER_XT_MATCH_GRADM
110129+ tristate '"gradm" match support'
110130+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
110131+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
110132+ ---help---
110133+ The gradm match allows to match on grsecurity RBAC being enabled.
110134+ It is useful when iptables rules are applied early on bootup to
110135+ prevent connections to the machine (except from a trusted host)
110136+ while the RBAC system is disabled.
110137+
110138 config NETFILTER_XT_MATCH_HASHLIMIT
110139 tristate '"hashlimit" match support'
110140 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
110141diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
110142index bffdad7..f9317d1 100644
110143--- a/net/netfilter/Makefile
110144+++ b/net/netfilter/Makefile
110145@@ -133,6 +133,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
110146 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
110147 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
110148 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
110149+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
110150 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
110151 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
110152 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
110153diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
110154index ec8114f..6b2bfba 100644
110155--- a/net/netfilter/ipset/ip_set_core.c
110156+++ b/net/netfilter/ipset/ip_set_core.c
110157@@ -1921,7 +1921,7 @@ done:
110158 return ret;
110159 }
110160
110161-static struct nf_sockopt_ops so_set __read_mostly = {
110162+static struct nf_sockopt_ops so_set = {
110163 .pf = PF_INET,
110164 .get_optmin = SO_IP_SET,
110165 .get_optmax = SO_IP_SET + 1,
110166diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
110167index 610e19c..08d0c3f 100644
110168--- a/net/netfilter/ipvs/ip_vs_conn.c
110169+++ b/net/netfilter/ipvs/ip_vs_conn.c
110170@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
110171 /* Increase the refcnt counter of the dest */
110172 ip_vs_dest_hold(dest);
110173
110174- conn_flags = atomic_read(&dest->conn_flags);
110175+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
110176 if (cp->protocol != IPPROTO_UDP)
110177 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
110178 flags = cp->flags;
110179@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
110180
110181 cp->control = NULL;
110182 atomic_set(&cp->n_control, 0);
110183- atomic_set(&cp->in_pkts, 0);
110184+ atomic_set_unchecked(&cp->in_pkts, 0);
110185
110186 cp->packet_xmit = NULL;
110187 cp->app = NULL;
110188@@ -1187,7 +1187,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
110189
110190 /* Don't drop the entry if its number of incoming packets is not
110191 located in [0, 8] */
110192- i = atomic_read(&cp->in_pkts);
110193+ i = atomic_read_unchecked(&cp->in_pkts);
110194 if (i > 8 || i < 0) return 0;
110195
110196 if (!todrop_rate[i]) return 0;
110197diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
110198index e683675..67cb16b 100644
110199--- a/net/netfilter/ipvs/ip_vs_core.c
110200+++ b/net/netfilter/ipvs/ip_vs_core.c
110201@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
110202 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
110203 /* do not touch skb anymore */
110204
110205- atomic_inc(&cp->in_pkts);
110206+ atomic_inc_unchecked(&cp->in_pkts);
110207 ip_vs_conn_put(cp);
110208 return ret;
110209 }
110210@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
110211 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
110212 pkts = sysctl_sync_threshold(ipvs);
110213 else
110214- pkts = atomic_add_return(1, &cp->in_pkts);
110215+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
110216
110217 if (ipvs->sync_state & IP_VS_STATE_MASTER)
110218 ip_vs_sync_conn(net, cp, pkts);
110219diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
110220index 581a658..910e112 100644
110221--- a/net/netfilter/ipvs/ip_vs_ctl.c
110222+++ b/net/netfilter/ipvs/ip_vs_ctl.c
110223@@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
110224 */
110225 ip_vs_rs_hash(ipvs, dest);
110226 }
110227- atomic_set(&dest->conn_flags, conn_flags);
110228+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
110229
110230 /* bind the service */
110231 old_svc = rcu_dereference_protected(dest->svc, 1);
110232@@ -1654,7 +1654,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
110233 * align with netns init in ip_vs_control_net_init()
110234 */
110235
110236-static struct ctl_table vs_vars[] = {
110237+static ctl_table_no_const vs_vars[] __read_only = {
110238 {
110239 .procname = "amemthresh",
110240 .maxlen = sizeof(int),
110241@@ -2075,7 +2075,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
110242 " %-7s %-6d %-10d %-10d\n",
110243 &dest->addr.in6,
110244 ntohs(dest->port),
110245- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
110246+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
110247 atomic_read(&dest->weight),
110248 atomic_read(&dest->activeconns),
110249 atomic_read(&dest->inactconns));
110250@@ -2086,7 +2086,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
110251 "%-7s %-6d %-10d %-10d\n",
110252 ntohl(dest->addr.ip),
110253 ntohs(dest->port),
110254- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
110255+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
110256 atomic_read(&dest->weight),
110257 atomic_read(&dest->activeconns),
110258 atomic_read(&dest->inactconns));
110259@@ -2564,7 +2564,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
110260
110261 entry.addr = dest->addr.ip;
110262 entry.port = dest->port;
110263- entry.conn_flags = atomic_read(&dest->conn_flags);
110264+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
110265 entry.weight = atomic_read(&dest->weight);
110266 entry.u_threshold = dest->u_threshold;
110267 entry.l_threshold = dest->l_threshold;
110268@@ -3107,7 +3107,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
110269 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
110270 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
110271 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
110272- (atomic_read(&dest->conn_flags) &
110273+ (atomic_read_unchecked(&dest->conn_flags) &
110274 IP_VS_CONN_F_FWD_MASK)) ||
110275 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
110276 atomic_read(&dest->weight)) ||
110277@@ -3697,7 +3697,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
110278 {
110279 int idx;
110280 struct netns_ipvs *ipvs = net_ipvs(net);
110281- struct ctl_table *tbl;
110282+ ctl_table_no_const *tbl;
110283
110284 atomic_set(&ipvs->dropentry, 0);
110285 spin_lock_init(&ipvs->dropentry_lock);
110286diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
110287index 547ff33..c8c8117 100644
110288--- a/net/netfilter/ipvs/ip_vs_lblc.c
110289+++ b/net/netfilter/ipvs/ip_vs_lblc.c
110290@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
110291 * IPVS LBLC sysctl table
110292 */
110293 #ifdef CONFIG_SYSCTL
110294-static struct ctl_table vs_vars_table[] = {
110295+static ctl_table_no_const vs_vars_table[] __read_only = {
110296 {
110297 .procname = "lblc_expiration",
110298 .data = NULL,
110299diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
110300index 3f21a2f..a112e85 100644
110301--- a/net/netfilter/ipvs/ip_vs_lblcr.c
110302+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
110303@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
110304 * IPVS LBLCR sysctl table
110305 */
110306
110307-static struct ctl_table vs_vars_table[] = {
110308+static ctl_table_no_const vs_vars_table[] __read_only = {
110309 {
110310 .procname = "lblcr_expiration",
110311 .data = NULL,
110312diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
110313index db80126..ef7110e 100644
110314--- a/net/netfilter/ipvs/ip_vs_sync.c
110315+++ b/net/netfilter/ipvs/ip_vs_sync.c
110316@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
110317 cp = cp->control;
110318 if (cp) {
110319 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
110320- pkts = atomic_add_return(1, &cp->in_pkts);
110321+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
110322 else
110323 pkts = sysctl_sync_threshold(ipvs);
110324 ip_vs_sync_conn(net, cp->control, pkts);
110325@@ -771,7 +771,7 @@ control:
110326 if (!cp)
110327 return;
110328 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
110329- pkts = atomic_add_return(1, &cp->in_pkts);
110330+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
110331 else
110332 pkts = sysctl_sync_threshold(ipvs);
110333 goto sloop;
110334@@ -895,7 +895,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
110335
110336 if (opt)
110337 memcpy(&cp->in_seq, opt, sizeof(*opt));
110338- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
110339+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
110340 cp->state = state;
110341 cp->old_state = cp->state;
110342 /*
110343diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
110344index 73ba1cc..1adfc7a 100644
110345--- a/net/netfilter/ipvs/ip_vs_xmit.c
110346+++ b/net/netfilter/ipvs/ip_vs_xmit.c
110347@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
110348 else
110349 rc = NF_ACCEPT;
110350 /* do not touch skb anymore */
110351- atomic_inc(&cp->in_pkts);
110352+ atomic_inc_unchecked(&cp->in_pkts);
110353 goto out;
110354 }
110355
110356@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
110357 else
110358 rc = NF_ACCEPT;
110359 /* do not touch skb anymore */
110360- atomic_inc(&cp->in_pkts);
110361+ atomic_inc_unchecked(&cp->in_pkts);
110362 goto out;
110363 }
110364
110365diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
110366index a4b5e2a..13b1de3 100644
110367--- a/net/netfilter/nf_conntrack_acct.c
110368+++ b/net/netfilter/nf_conntrack_acct.c
110369@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
110370 #ifdef CONFIG_SYSCTL
110371 static int nf_conntrack_acct_init_sysctl(struct net *net)
110372 {
110373- struct ctl_table *table;
110374+ ctl_table_no_const *table;
110375
110376 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
110377 GFP_KERNEL);
110378diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
110379index 1f4f954..e364ad7 100644
110380--- a/net/netfilter/nf_conntrack_core.c
110381+++ b/net/netfilter/nf_conntrack_core.c
110382@@ -1789,6 +1789,10 @@ void nf_conntrack_init_end(void)
110383 #define DYING_NULLS_VAL ((1<<30)+1)
110384 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
110385
110386+#ifdef CONFIG_GRKERNSEC_HIDESYM
110387+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
110388+#endif
110389+
110390 int nf_conntrack_init_net(struct net *net)
110391 {
110392 int ret = -ENOMEM;
110393@@ -1814,7 +1818,11 @@ int nf_conntrack_init_net(struct net *net)
110394 if (!net->ct.stat)
110395 goto err_pcpu_lists;
110396
110397+#ifdef CONFIG_GRKERNSEC_HIDESYM
110398+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
110399+#else
110400 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
110401+#endif
110402 if (!net->ct.slabname)
110403 goto err_slabname;
110404
110405diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
110406index 1df1761..ce8b88a 100644
110407--- a/net/netfilter/nf_conntrack_ecache.c
110408+++ b/net/netfilter/nf_conntrack_ecache.c
110409@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
110410 #ifdef CONFIG_SYSCTL
110411 static int nf_conntrack_event_init_sysctl(struct net *net)
110412 {
110413- struct ctl_table *table;
110414+ ctl_table_no_const *table;
110415
110416 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
110417 GFP_KERNEL);
110418diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
110419index 5b3eae7..dd4b8fe 100644
110420--- a/net/netfilter/nf_conntrack_helper.c
110421+++ b/net/netfilter/nf_conntrack_helper.c
110422@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
110423
110424 static int nf_conntrack_helper_init_sysctl(struct net *net)
110425 {
110426- struct ctl_table *table;
110427+ ctl_table_no_const *table;
110428
110429 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
110430 GFP_KERNEL);
110431diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
110432index b65d586..beec902 100644
110433--- a/net/netfilter/nf_conntrack_proto.c
110434+++ b/net/netfilter/nf_conntrack_proto.c
110435@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
110436
110437 static void
110438 nf_ct_unregister_sysctl(struct ctl_table_header **header,
110439- struct ctl_table **table,
110440+ ctl_table_no_const **table,
110441 unsigned int users)
110442 {
110443 if (users > 0)
110444diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
110445index f641751..d3c5b51 100644
110446--- a/net/netfilter/nf_conntrack_standalone.c
110447+++ b/net/netfilter/nf_conntrack_standalone.c
110448@@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
110449
110450 static int nf_conntrack_standalone_init_sysctl(struct net *net)
110451 {
110452- struct ctl_table *table;
110453+ ctl_table_no_const *table;
110454
110455 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
110456 GFP_KERNEL);
110457diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
110458index 7a394df..bd91a8a 100644
110459--- a/net/netfilter/nf_conntrack_timestamp.c
110460+++ b/net/netfilter/nf_conntrack_timestamp.c
110461@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
110462 #ifdef CONFIG_SYSCTL
110463 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
110464 {
110465- struct ctl_table *table;
110466+ ctl_table_no_const *table;
110467
110468 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
110469 GFP_KERNEL);
110470diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
110471index 85296d4..8becdec 100644
110472--- a/net/netfilter/nf_log.c
110473+++ b/net/netfilter/nf_log.c
110474@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
110475
110476 #ifdef CONFIG_SYSCTL
110477 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
110478-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
110479+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
110480
110481 static int nf_log_proc_dostring(struct ctl_table *table, int write,
110482 void __user *buffer, size_t *lenp, loff_t *ppos)
110483@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
110484 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
110485 mutex_unlock(&nf_log_mutex);
110486 } else {
110487+ ctl_table_no_const nf_log_table = *table;
110488+
110489 mutex_lock(&nf_log_mutex);
110490 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
110491 lockdep_is_held(&nf_log_mutex));
110492 if (!logger)
110493- table->data = "NONE";
110494+ nf_log_table.data = "NONE";
110495 else
110496- table->data = logger->name;
110497- r = proc_dostring(table, write, buffer, lenp, ppos);
110498+ nf_log_table.data = logger->name;
110499+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
110500 mutex_unlock(&nf_log_mutex);
110501 }
110502
110503diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
110504index f042ae5..30ea486 100644
110505--- a/net/netfilter/nf_sockopt.c
110506+++ b/net/netfilter/nf_sockopt.c
110507@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
110508 }
110509 }
110510
110511- list_add(&reg->list, &nf_sockopts);
110512+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
110513 out:
110514 mutex_unlock(&nf_sockopt_mutex);
110515 return ret;
110516@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
110517 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
110518 {
110519 mutex_lock(&nf_sockopt_mutex);
110520- list_del(&reg->list);
110521+ pax_list_del((struct list_head *)&reg->list);
110522 mutex_unlock(&nf_sockopt_mutex);
110523 }
110524 EXPORT_SYMBOL(nf_unregister_sockopt);
110525diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
110526index d292c8d..9f1e166 100644
110527--- a/net/netfilter/nfnetlink_log.c
110528+++ b/net/netfilter/nfnetlink_log.c
110529@@ -79,7 +79,7 @@ static int nfnl_log_net_id __read_mostly;
110530 struct nfnl_log_net {
110531 spinlock_t instances_lock;
110532 struct hlist_head instance_table[INSTANCE_BUCKETS];
110533- atomic_t global_seq;
110534+ atomic_unchecked_t global_seq;
110535 };
110536
110537 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
110538@@ -561,7 +561,7 @@ __build_packet_message(struct nfnl_log_net *log,
110539 /* global sequence number */
110540 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
110541 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
110542- htonl(atomic_inc_return(&log->global_seq))))
110543+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
110544 goto nla_put_failure;
110545
110546 if (data_len) {
110547diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
110548index 1840989..6895744 100644
110549--- a/net/netfilter/nft_compat.c
110550+++ b/net/netfilter/nft_compat.c
110551@@ -225,7 +225,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
110552 /* We want to reuse existing compat_to_user */
110553 old_fs = get_fs();
110554 set_fs(KERNEL_DS);
110555- t->compat_to_user(out, in);
110556+ t->compat_to_user((void __force_user *)out, in);
110557 set_fs(old_fs);
110558 ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
110559 kfree(out);
110560@@ -421,7 +421,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
110561 /* We want to reuse existing compat_to_user */
110562 old_fs = get_fs();
110563 set_fs(KERNEL_DS);
110564- m->compat_to_user(out, in);
110565+ m->compat_to_user((void __force_user *)out, in);
110566 set_fs(old_fs);
110567 ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
110568 kfree(out);
110569diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
110570index bbffdbda..12d4da8 100644
110571--- a/net/netfilter/xt_bpf.c
110572+++ b/net/netfilter/xt_bpf.c
110573@@ -23,11 +23,10 @@ MODULE_ALIAS("ip6t_bpf");
110574 static int bpf_mt_check(const struct xt_mtchk_param *par)
110575 {
110576 struct xt_bpf_info *info = par->matchinfo;
110577- struct sock_fprog_kern program;
110578+ struct sock_fprog program;
110579
110580 program.len = info->bpf_program_num_elem;
110581- program.filter = info->bpf_program;
110582-
110583+ program.filter = (struct sock_filter __user *) info->bpf_program;
110584 if (sk_unattached_filter_create(&info->filter, &program)) {
110585 pr_info("bpf: check failed: parse error\n");
110586 return -EINVAL;
110587diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
110588new file mode 100644
110589index 0000000..c566332
110590--- /dev/null
110591+++ b/net/netfilter/xt_gradm.c
110592@@ -0,0 +1,51 @@
110593+/*
110594+ * gradm match for netfilter
110595